diff --git a/mention-pair-classifier.ipynb b/mention-pair-classifier.ipynb new file mode 100644 index 0000000..7b9e01b --- /dev/null +++ b/mention-pair-classifier.ipynb @@ -0,0 +1,285 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from keras.models import Model\n", + "from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization\n", + "from keras.optimizers import SGD, Adam\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Data preparation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "filename = 'input_data.csv'\n", + "raw_data = open(filename, 'rt')\n", + "data = np.loadtxt(raw_data, delimiter= '\\t')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "print data.shape" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "Our dataset consists of ~466K examples (pairs of mentions), each example described by 1126 features. Labels say whether a pair belongs to the same cluster (1) or not (0)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "size_of_dataset = len(data)\n", + "number_of_features = 1126\n", + "\n", + "X = data[:,0:1126]\n", + "Y = data[:,1126] #last column consists of labels\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "Now let's split data into trainig and test set (90/10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "np.random.seed(999) #seed fixed for reproducibility\n", + "mask = np.random.rand(size_of_dataset) < 0.9 #array of boolean variables\n", + "\n", + "training_set = X[mask]\n", + "training_labels = Y[mask]\n", + "\n", + "test_set = X[~mask]\n", + "test_labels = Y[~mask]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Neural network configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "inputs = Input(shape=(number_of_features,))\n", + "output_from_1st_layer = Dense(1000, activation='relu')(inputs)\n", + "output_from_1st_layer = Dropout(0.5)(output_from_1st_layer)\n", + "output_from_1st_layer = BatchNormalization()(output_from_1st_layer)\n", + "output_from_2nd_layer = Dense(500, activation='relu')(output_from_1st_layer)\n", + "output_from_2nd_layer = Dropout(0.5)(output_from_2nd_layer)\n", + "output_from_2nd_layer = BatchNormalization()(output_from_2nd_layer)\n", + "output = Dense(1, activation='sigmoid')(output_from_2nd_layer)\n", + "\n", + "model = Model(inputs, output)\n", + "model.compile(optimizer='Adam',loss='binary_crossentropy',metrics=['accuracy'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "model.fit(training_set, training_labels, batch_size=256, nb_epoch=25)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "source": [ + "# Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true, + "scrolled": true + }, + "outputs": [], + "source": [ + "scores = model.evaluate(test_set, test_labels)\n", + "print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Playing with the model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "You can save the weights of the model to a file and later recreate the model without training by model.load_weights(\"my_weights.h5\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "model.save_weights(\"my_weights.h5\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To have predictions for a test set we do" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "predictions = model.predict(test_set)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and for a single example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "single_example = test_set[4:5,:] #example number 5 from the test set\n", + "prediction = model.predict(single_example)\n", + "print '%.8f' % prediction[0]" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}