From 87c8a5217622d56975dd60990bde7a6e7ebd9ea5 Mon Sep 17 00:00:00 2001
From: Pawel Morawiecki <pawel.morawiecki@gmail.com>
Date: Thu, 27 Apr 2017 12:53:51 +0200
Subject: [PATCH] 10-fold cross validation of the model

---
 cross_validation.ipynb | 207 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 207 insertions(+), 0 deletions(-)
 create mode 100644 cross_validation.ipynb

diff --git a/cross_validation.ipynb b/cross_validation.ipynb
new file mode 100644
index 0000000..b861e78
--- /dev/null
+++ b/cross_validation.ipynb
@@ -0,0 +1,207 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "from keras.models import Model\n",
+    "from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization\n",
+    "from keras.optimizers import SGD, Adam\n",
+    "import numpy as np\n",
+    "from sklearn.model_selection import StratifiedKFold"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "# Preparing data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "filename = 'input_data.csv'\n",
+    "raw_data = open(filename, 'rt')\n",
+    "data = np.loadtxt(raw_data, delimiter= '\\t')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "print data.shape"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Our dataset consists of ~466K examples (pairs of mentions), each example described by 1126 features. Labels say whether a pair belongs to the same cluster (1) or not (0)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "size_of_dataset = 466852\n",
+    "number_of_features = 1126\n",
+    "\n",
+    "X = data[:,0:1126]\n",
+    "Y = data[:,1126] #last column consists of labels\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "# 10-fold cross validation of the neural network model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "seed = 1\n",
+    "kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\n",
+    "cvscores = []\n",
+    "precision_scores = []\n",
+    "recall_scores = []\n",
+    "f1_scores = []\n",
+    "\n",
+    "for train, test in kfold.split(X, Y):\n",
+    "\n",
+    "    inputs = Input(shape=(number_of_features,))\n",
+    "    output_from_1st_layer = Dense(1000, activation='relu')(inputs)\n",
+    "    output_from_1st_layer = Dropout(0.5)(output_from_1st_layer)\n",
+    "    output_from_1st_layer = BatchNormalization()(output_from_1st_layer)\n",
+    "    output_from_2nd_layer = Dense(500, activation='relu')(output_from_1st_layer)\n",
+    "    output_from_2nd_layer = Dropout(0.5)(output_from_2nd_layer)\n",
+    "    output_from_2nd_layer = BatchNormalization()(output_from_2nd_layer)\n",
+    "    output = Dense(1, activation='sigmoid')(output_from_2nd_layer)\n",
+    "\n",
+    "    model = Model(inputs, output)\n",
+    "    model.compile(optimizer='Adam',loss='binary_crossentropy',metrics=['accuracy'])\n",
+    "    model.fit(X[train], Y[train], batch_size=256, nb_epoch=25)\n",
+    "   \n",
+    "    # evaluate the model\n",
+    "    scores = model.evaluate(X[test], Y[test])\n",
+    "    print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n",
+    "    cvscores.append(scores[1] * 100)\n",
+    "\n",
+    "    #calculate other metrics: precision, recall, f1\n",
+    "    predictions = model.predict(X[test])\n",
+    "    true_positives = 0.0\n",
+    "    false_positives = 0.0\n",
+    "    true_negatives = 0.0\n",
+    "    false_negatives = 0.0\n",
+    "\n",
+    "    for i in range(len(X[test])):\n",
+    "        if (predictions[i]<0.5 and Y[test][i]==0): true_negatives += 1     \n",
+    "        if (predictions[i]<0.5 and Y[test][i]==1): false_negatives += 1\n",
+    "        if (predictions[i]>=0.5 and Y[test][i]==1): true_positives += 1\n",
+    "        if (predictions[i]>=0.5 and Y[test][i]==0): false_positives += 1  \n",
+    "    \n",
+    "    precision = true_positives/(true_positives+false_positives)\n",
+    "    recall = true_positives/(true_positives+false_negatives)\n",
+    "    f1 = 2*(precision*recall)/(precision+recall)\n",
+    "\n",
+    "    precision_scores.append(precision)\n",
+    "    recall_scores.append(recall)\n",
+    "    f1_scores.append(f1)\n",
+    "\n",
+    "    print ('Precision: ' + repr(precision))\n",
+    "    print ('Recall: ' + repr(recall))\n",
+    "    print ('F1: ' + repr(f1))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "# Summary"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "print(\"%.2f%% (+/- %.2f%%)\" % (np.mean(cvscores), np.std(cvscores)))\n",
+    "print(\"%.2f%% (+/- %.2f%%)\" % (np.mean(precision_scores), np.std(precision_scores)))\n",
+    "print(\"%.2f%% (+/- %.2f%%)\" % (np.mean(recall_scores), np.std(recall_scores)))\n",
+    "print(\"%.2f%% (+/- %.2f%%)\" % (np.mean(f1_scores), np.std(f1_scores)))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
--
libgit2 0.22.2