Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding main.py and name_entity.py to a new branch for sentiment anali… #17

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import glob2
import nltk
import string
import scipy
from collections import Counter
from nltk.corpus import stopwords
from nltk.stem.porter import *
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np # a conventional alias
import sklearn.feature_extraction.text as text
import gensim
from gensim import corpora
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import re
import dateparser
from name_entity import *
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()


def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized


def main():
for folder_name in glob2.glob("DentonDD2/20*/"):
folder_items = glob2.glob("%s*.txt" % folder_name)
print folder_name, ":", len(folder_items)
doc_complete = []
for index, filename in enumerate(glob2.glob("%s*.txt" % folder_name)):
if(not re.findall("(stopword|xfiles)", filename)):
doc_complete.append(open(filename).read())
doc_clean = [filter(lambda x: len(x) > 2, clean(doc.decode("ascii", "ignore")).split())
for doc in doc_complete]
dictionary = corpora.Dictionary(doc_clean)
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
Lda = gensim.models.ldamodel.LdaModel
ldamodel = Lda(doc_term_matrix, num_topics=4,
id2word=dictionary, passes=1)
topics = ldamodel.print_topics()
for index, doc in enumerate(doc_clean):
topic_max = None
topic_value = 0
for topic in topics:
sum_of_terms = 0
col = Counter(doc)
for word in re.findall("\"(\w+)\"", topic[1]):
if(word in col):
sum_of_terms+=col[word]
if(sum_of_terms >= topic_value):
topic_value =sum_of_terms
topic_max = topic
print topic_max
sentences = nltk.sent_tokenize(doc_complete[index].decode("ascii", "ignore"))
regex = "|".join([word for word in re.findall("\"(\w+)\"", topic_max[1])])
important_sentences = [sentence for sentence in sentences if re.findall(regex,sentence) ]
print len(important_sentences)
name_entities = set()
date_time = set()
for sentence in important_sentences:
name_entities.update(get_name_entities(sentence))
date_time.add(dateparser.parse(sentence))


if __name__ == "__main__":
main()
22 changes: 22 additions & 0 deletions name_entity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import nltk

def extract_entity_names(t):
entity_names = []

if hasattr(t, 'label') and t.label:
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child))

return entity_names
def get_name_entities(sample):
sentences = nltk.sent_tokenize(sample)
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
entity_names = []
for tree in chunked_sentences:
entity_names.extend(extract_entity_names(tree))
return set(entity_names)