diff --git a/Experiments/.idea/.name b/Experiments/.idea/.name
new file mode 100644
index 0000000..930b7b7
--- /dev/null
+++ b/Experiments/.idea/.name
@@ -0,0 +1 @@
+SML-Homework
\ No newline at end of file
diff --git a/Experiments/.idea/SML-Homework.iml b/Experiments/.idea/SML-Homework.iml
new file mode 100644
index 0000000..6711606
--- /dev/null
+++ b/Experiments/.idea/SML-Homework.iml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Experiments/.idea/codeStyleSettings.xml b/Experiments/.idea/codeStyleSettings.xml
new file mode 100644
index 0000000..7706eb7
--- /dev/null
+++ b/Experiments/.idea/codeStyleSettings.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Experiments/.idea/misc.xml b/Experiments/.idea/misc.xml
new file mode 100644
index 0000000..5bbe586
--- /dev/null
+++ b/Experiments/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/Experiments/.idea/modules.xml b/Experiments/.idea/modules.xml
new file mode 100644
index 0000000..41e1962
--- /dev/null
+++ b/Experiments/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Experiments/.idea/workspace.xml b/Experiments/.idea/workspace.xml
new file mode 100644
index 0000000..b97094d
--- /dev/null
+++ b/Experiments/.idea/workspace.xml
@@ -0,0 +1,409 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ DEFINITION_ORDER
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ AngularJS
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1509981910357
+
+
+ 1509981910357
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Experiments/learningmethod/__init__.py b/Experiments/learningmethod/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/Experiments/learningmethod/__pycache__/__init__.cpython-35.pyc b/Experiments/learningmethod/__pycache__/__init__.cpython-35.pyc
new file mode 100644
index 0000000..3ab9b96
Binary files /dev/null and b/Experiments/learningmethod/__pycache__/__init__.cpython-35.pyc differ
diff --git a/Experiments/learningmethod/example.py b/Experiments/learningmethod/example.py
new file mode 100644
index 0000000..32af88f
--- /dev/null
+++ b/Experiments/learningmethod/example.py
@@ -0,0 +1,40 @@
+from sklearn.datasets import load_iris
+from sklearn.feature_extraction.text import CountVectorizer
+import pandas
+
+# Example of dataset
+
+iris = load_iris()
+
+featureMatrix = iris.data
+labelVector = iris.target
+
+print(featureMatrix.shape)
+print(labelVector.shape)
+print(iris.feature_names)
+print(featureMatrix[1])
+
+# ----------------------#
+# Text training example #
+# ----------------------#
+
+print("---")
+
+dataset = ["call you tonight", "Call me a cab", "please call me... PLEASE!"]
+vector = CountVectorizer()
+
+# Learn the "vocabulary" of the training data (occurs in-place)
+vector.fit(dataset)
+featureNames = vector.get_feature_names()
+
+print(featureNames)
+
+# Transform training data into a "document-term matrix'
+documentTermMatrix = vector.transform(dataset)
+
+# convert sparse matrix to a dense matrix
+documentTermMatrix.toarray()
+
+# examine the vocabulary and document-term matrix together
+df = pandas.DataFrame(documentTermMatrix.toarray(), columns=vector.get_feature_names())
+print(df.head())
diff --git a/Experiments/learningmethod/experimentOne.py b/Experiments/learningmethod/experimentOne.py
new file mode 100644
index 0000000..d3c54ea
--- /dev/null
+++ b/Experiments/learningmethod/experimentOne.py
@@ -0,0 +1,137 @@
+from sklearn.model_selection import train_test_split
+
+from sklearn.feature_extraction.text import CountVectorizer
+from sklearn.tree import DecisionTreeClassifier
+from sklearn.linear_model import LogisticRegression
+from sklearn import metrics
+from sklearn.neural_network import MLPClassifier
+
+import pandas
+from pandas import DataFrame
+
+import os
+
+workspace = "/home/toshuumilia/Workspace/SML/" # Insert the working directory here.
+datasetPath = workspace + "data/sms.tsv" # Tells where is located the data
+experimentOnePath = workspace + "experiment/experimentOne.csv" # Location of the first experiment result
+
+
+smsDF = pandas.read_table(datasetPath, header=None, names=["label", "message"])
+smsDF["label_numerical"] = smsDF.label.map({"ham": 0, "spam": 1})
+
+smsDataset = smsDF.message
+smsLabel = smsDF.label_numerical
+
+methodArray = []
+measureArray = []
+valueArray = []
+
+# Simulate ten trees so we can have an average.
+for x in range(0, 15):
+ # Create the datasets and the labels used for the ML.
+ # TODO: Parameter to test: how to split the smsDataset into train and test.
+ dataset_train, dataset_test, label_train, label_test = train_test_split(smsDataset, smsLabel, random_state=1)
+
+ # Note: DTM=documentTermMatrix
+ vectorizer = CountVectorizer()
+ trainDTM = vectorizer.fit_transform(dataset_train)
+ testDTM = vectorizer.transform(dataset_test)
+
+ # DECISION TREE
+ # TODO: Explore which parameters could be used.
+ # SEE: http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
+ decisionTree = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=None,
+ min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
+ max_features=None, random_state=None, max_leaf_nodes=None,
+ min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None,
+ presort=False)
+ decisionTree.fit(trainDTM, label_train)
+
+ label_predicted = decisionTree.predict(testDTM)
+
+ # SEE: https://en.wikipedia.org/wiki/Precision_and_recall
+ valueArray.append(metrics.precision_score(label_test, label_predicted))
+ measureArray.append("precision")
+ methodArray.append("Decision Tree")
+
+ valueArray.append(metrics.recall_score(label_test, label_predicted))
+ measureArray.append("recall")
+ methodArray.append("Decision Tree")
+
+ valueArray.append(metrics.accuracy_score(label_test, label_predicted))
+ measureArray.append("accuracy")
+ methodArray.append("Decision Tree")
+
+ valueArray.append(metrics.f1_score(label_test, label_predicted))
+ measureArray.append("f1score")
+ methodArray.append("Decision Tree")
+
+ # LOGISTIC REGRESSION
+ # TODO: Explore which parameters could be used.
+ # SEE: http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
+ logisticRegression = LogisticRegression(penalty='l2', dual=False, tol=0.0001,
+ C=1.0, fit_intercept=True, intercept_scaling=1,
+ class_weight=None, random_state=None, solver='liblinear',
+ max_iter=100, multi_class='ovr', verbose=0,
+ warm_start=False, n_jobs=1)
+ logisticRegression.fit(trainDTM, label_train)
+
+ label_predicted = logisticRegression.predict(testDTM)
+
+ valueArray.append(metrics.precision_score(label_test, label_predicted))
+ measureArray.append("precision")
+ methodArray.append("Logistic Regression")
+
+ valueArray.append(metrics.recall_score(label_test, label_predicted))
+ measureArray.append("recall")
+ methodArray.append("Logistic Regression")
+
+ valueArray.append(metrics.accuracy_score(label_test, label_predicted))
+ measureArray.append("accuracy")
+ methodArray.append("Logistic Regression")
+
+ valueArray.append(metrics.f1_score(label_test, label_predicted))
+ measureArray.append("f1score")
+ methodArray.append("Logistic Regression")
+
+ # NEURAL NETWORK
+ # SEE: http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
+ neuralNetwork = MLPClassifier(hidden_layer_sizes=(5,), activation='relu', solver='adam',
+ alpha=0.0001, batch_size='auto', learning_rate='constant',
+ learning_rate_init=0.001, power_t=0.5, max_iter=200,
+ shuffle=True, random_state=None, tol=0.0001,
+ verbose=False, warm_start=False, momentum=0.9,
+ nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1,
+ beta_1=0.9, beta_2=0.999, epsilon=1e-08)
+
+ neuralNetwork.fit(trainDTM, label_train)
+
+ label_predicted = neuralNetwork.predict(testDTM)
+
+ valueArray.append(metrics.precision_score(label_test, label_predicted))
+ measureArray.append("precision")
+ methodArray.append("Neural Network")
+
+ valueArray.append(metrics.recall_score(label_test, label_predicted))
+ measureArray.append("recall")
+ methodArray.append("Neural Network")
+
+ valueArray.append(metrics.accuracy_score(label_test, label_predicted))
+ measureArray.append("accuracy")
+ methodArray.append("Neural Network")
+
+ valueArray.append(metrics.f1_score(label_test, label_predicted))
+ measureArray.append("f1score")
+ methodArray.append("Neural Network")
+
+ print("Step", x, "done.")
+
+experimentOneDF = DataFrame()
+experimentOneDF["measure"] = measureArray
+experimentOneDF["value"] = valueArray
+experimentOneDF["method"] = methodArray
+
+if not os.path.exists(workspace + "results/"):
+ os.makedirs(workspace + "results/")
+
+experimentOneDF.to_csv(experimentOnePath)
diff --git a/Experiments/learningmethod/showGraph.py b/Experiments/learningmethod/showGraph.py
new file mode 100644
index 0000000..c72ccb6
--- /dev/null
+++ b/Experiments/learningmethod/showGraph.py
@@ -0,0 +1,20 @@
+import matplotlib.pyplot as pyplot
+import seaborn
+
+import pandas
+
+workspace = "/home/toshuumilia/Workspace/SML/" # Insert the working directory here.
+datasetPath = workspace + "data/sms.tsv" # Tells where is located the data
+experimentOnePath = workspace + "results/experimentOne.csv" # Location of the first experiment result
+globalFigsize = (15, 6) # Graphs parameters
+
+experimentOneDF = pandas.read_csv(experimentOnePath)
+
+seaborn.set_style("whitegrid")
+pyplot.figure(figsize=globalFigsize)
+seaborn.barplot(x="measure", y="value", hue="method",
+ data=experimentOneDF, palette="Blues_d")
+pyplot.ylabel('value', fontsize=12)
+pyplot.xlabel('measure', fontsize=12)
+pyplot.title('Insert Title', fontsize=15)
+pyplot.show()