import nltk
import pandas as pd
import string
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn import metrics
complaints = pd.read_csv("Data/complaint1700.csv")
noncomplaints = pd.read_csv("Data/noncomplaint1700.csv")
complaints['category']= "complaints"
noncomplaints['category']= "noncomplaints"
df = pd.concat([complaints,noncomplaints],axis=0,ignore_index=True)
df.head()
X = df['tweet']
y = df['category']
#without tdif
cv = CountVectorizer()
X = cv.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(X_train,y_train)
predictions = nb.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))#BEST MODEL
from sklearn.naive_bayes import ComplementNB
cnb = ComplementNB()
cnb.fit(X_train,y_train)
predictions = nb.predict(X_test)
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
rf = RandomForestClassifier()
rf.fit(X_train,y_train)
rf_predictions = rf.predict(X_test)
print(confusion_matrix(y_test,rf_predictions))
print('\n')
print(classification_report(y_test,rf_predictions))
param_grid = {'n_estimators':[5,10,20,30],
'max_depth':[1,3,5,7,9,11,15]}
scores = ['precision']
grid_search = GridSearchCV(rf, param_grid, cv=5, scoring='%s_macro' % scores[0])
grid_search.fit(X_train,y_train)
print("Best Hyperparameter is:")
print(grid_search.best_params_)
print("Results are:")
means = grid_search.cv_results_['mean_test_score']
for mean,params in zip(means, grid_search.cv_results_['params']):
print("%0.3f for %r" % (mean, params))
from sklearn.svm import SVC
import warnings
warnings.filterwarnings("ignore")
tuned_parameters = [
{'kernel': ['linear'], 'C': [0.01,0.1,1.0,5.0,10.0,100.0]}]
scores = ['precision']
clf = GridSearchCV(SVC(), tuned_parameters, cv=5, scoring='%s_macro' % scores[0])
clf.fit(X_train, y_train)
print("Best Hyperparameter is:")
print(clf.best_params_)
print("Result is:")
means = clf.cv_results_['mean_test_score']
for mean,params in zip(means, clf.cv_results_['params']):
print("%0.3f for %r" % (mean, params))
tuned_parameters = [
{'kernel': ['rbf'], 'gamma': [0.0001,0.001,0.1,1,10,100],'C': [0.1,1,10,100,1000]}]
# Objective metrics
scores = ['precision']
clf = GridSearchCV(SVC(), tuned_parameters, cv=5, scoring='%s_macro' % scores[0])
clf.fit(X_train, y_train)
print("Best Hyperparameters are:")
print(clf.best_params_)
print("Results are:")
means = clf.cv_results_['mean_test_score']
for mean,params in zip(means, clf.cv_results_['params']):
print("%0.3f for %r" % (mean, params))
tuned_parameters = [
{'kernel': ['poly'], 'degree':[0,1,2,5], 'C': [0.01,0.1,1.0,5.0,10.0,100.0]}]
# Objective metrics
scores = ['precision']
clf = GridSearchCV(SVC(), tuned_parameters, cv=5, scoring='%s_macro' % scores[0])
clf.fit(X_train, y_train)
print("Best Hyperparameter is:")
print(clf.best_params_)
print("Results are:")
means = clf.cv_results_['mean_test_score']
for mean,params in zip(means, clf.cv_results_['params']):
print("%0.3f for %r" % (mean, params))
svc = SVC(kernel='rbf',C=100, gamma=0.001)
svc.fit(X_train, y_train)
svc_predictions = svc.predict(X_test)
print(confusion_matrix(y_test,svc_predictions))
print('\n')
print(classification_report(y_test,svc_predictions))
def tokenizer(message):
noPunct = [char for char in message if char not in string.punctuation]
noPunct = "".join(noPunct)
return [word for word in noPunct.split() if word.lower not in stopwords.words("english")]
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=tokenizer)),
('tfidf', TfidfTransformer() ),
('classifier', MultinomialNB())
])#naive bayes
X = df['tweet']
y = df['category']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)
pipeline.fit(X_train, y_train)
tfidf_predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,tfidf_predictions))
print('\n')
print(classification_report(y_test,tfidf_predictions))
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=tokenizer)),
('tfidf', TfidfTransformer() ),
('classifier', RandomForestClassifier(max_depth = 15, n_estimators = 30))
])#random forest
pipeline.fit(X_train, y_train)
tfidf_predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,tfidf_predictions))
print('\n')
print(classification_report(y_test,tfidf_predictions))
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=tokenizer)),
('tfidf', TfidfTransformer() ),
('classifier', SVC(kernel='rbf',C=100, gamma=0.001))
])#svc
pipeline.fit(X_train, y_train)
tfidf_predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,tfidf_predictions))
print('\n')
print(classification_report(y_test,tfidf_predictions))
predictions[0]
import numpy as np
from pandas import DataFrame
myData = pd.read_csv("Data/Goke2.csv")
myData.head()
tweet = myData['tweet']
tweet = cv.transform(tweet)
all_predictions = nb.predict(tweet)
all_predictions.shape
#log_proba.shape
#log_proba[0:3]
df_pred = DataFrame(data = all_predictions, columns = ["prediction"], index = np.arange(0,4555))
print(df_pred)
final_df = pd.merge(myData, df_pred, left_index=True, right_index=True, how="outer")
final_df.head()
non_negative = final_df[final_df['prediction'] == "noncomplaints"]
non_negative #too many non-negative tweets, try with svc model, still best
tweet = myData['tweet']
all_predictions = svc.predict(tweet)
df_pred = DataFrame(data = all_predictions, columns = ["prediction"], index = np.arange(0,4555))
print(df_pred)
final_df = pd.merge(myData, df_pred, left_index=True, right_index=True, how="outer")
non_negative = final_df[final_df['prediction'] == "noncomplaints"]
non_negative
nbpipe_predict=pipeline.predict(myData['tweet'])
df_pred = DataFrame(data = all_predictions, columns = ["prediction"], index = np.arange(0,4555))
final_df = pd.merge(myData, df_pred, left_index=True, right_index=True, how="outer")
non_negative = final_df[final_df['prediction'] == "noncomplaints"]
non_negative
!pwd
export_csv = non_negative.to_csv (r'/Users/kevweirikefe/Documents/Simon Classes/CIS 434 - Social Media Analytics/Goke.csv', index = None, header=True)
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
tweet = myData['tweet']
def sentiment_analyzer_scores(sentence):
score = analyser.polarity_scores(sentence)
print("{:-<40} {}".format(sentence, str(score)))
sentiment_analyzer_scores(tweet)
for sentence in tweet:
print(sentence),
sentiment = sentiment_analyzer_scores(tweet)
print("\n\t" + str(sentiment))