diff --git a/Untitled4.ipynb b/Untitled4.ipynb
new file mode 100644
index 0000000..6d84f3f
--- /dev/null
+++ b/Untitled4.ipynb
@@ -0,0 +1,569 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "Untitled4.ipynb",
+ "version": "0.3.2",
+ "provenance": [],
+ "collapsed_sections": [],
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "2Bib0jQ5GSkv",
+ "colab_type": "code",
+ "outputId": "97774ca1-3787-41f0-d8d3-e33943913bb0",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 408
+ }
+ },
+ "source": [
+ "import argparse\n",
+ "'''\n",
+ "url = \"https://drive.google.com/open?id=15BdS8VGc6WXXW6Qyg1NMnaybvQFHQ7wt\"\n",
+ "import urllib,os\n",
+ "filename = \"hbhi.csv\"\n",
+ "if not os.path.isfile(filename):\n",
+ " urllib.request.urlretrieve(url, filename)\n",
+ "'''\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import tensorflow as tf\n",
+ "from keras import backend as K\n",
+ "from keras.applications.vgg16 import VGG16\n",
+ "from keras.layers import Dense, Dropout, Input\n",
+ "from keras.models import Model\n",
+ "from keras.models import Sequential\n",
+ "from keras.optimizers import Adamax\n",
+ "from keras.utils import np_utils\n",
+ "from tensorflow.python.saved_model import builder as saved_model_builder\n",
+ "from tensorflow.python.saved_model import utils\n",
+ "from tensorflow.python.saved_model import tag_constants, signature_constants\n",
+ "from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def\n",
+ "from tensorflow.contrib.session_bundle import exporter\n",
+ "class Namespace:\n",
+ " def __init__(self, **kwargs):\n",
+ " self.__dict__.update(kwargs)\n",
+ "'''\n",
+ "parser = argparse.ArgumentParser()\n",
+ "parser.add_argument('--csv_file', type=str, default='hbhi.csv')\n",
+ "parser.add_argument('--export_path', type=str,default='model_out/')\n",
+ "# OPTIONAL\n",
+ "parser.add_argument('--batch_size', type=int, default=1)\n",
+ "parser.add_argument('--n_epochs', type=int, default=1)\n",
+ "parser.add_argument('--debug', dest='debug', action='store_true')\n",
+ "'''\n",
+ "#FLAGS = parser.parse_args()\n",
+ "FLAGS = Namespace(batch_size=1, csv_file='hi7.csv', debug=False, export_path='model_o/', n_epochs=1)\n",
+ "\n",
+ "if (FLAGS.debug):\n",
+ " FLAGS.batch_size = 10\n",
+ " FLAGS.n_epochs = 1\n",
+ "\n",
+ "NUM_CLASSES = 2\n",
+ "IMG_SIZE = 48\n",
+ "\n",
+ "# TODO: Use the 'Usage' field to separate based on training/testing\n",
+ "TRAIN_END = 2713\n",
+ "TEST_START = TRAIN_END + 1\n",
+ "\n",
+ "\n",
+ "def split_for_test(list):\n",
+ " train = list[0:TRAIN_END]\n",
+ " test = list[TEST_START:]\n",
+ " return train, test\n",
+ "\n",
+ "\n",
+ "def pandas_vector_to_list(pandas_df):\n",
+ " py_list = [item[0] for item in pandas_df.values.tolist()]\n",
+ " return py_list\n",
+ "\n",
+ "\n",
+ "def process_emotion(emotion):\n",
+ " \"\"\"\n",
+ " Takes in a vector of emotions and outputs a list of emotions as one-hot vectors.\n",
+ " :param emotion: vector of ints (0-7)\n",
+ " :return: list of one-hot vectors (array of 7)\n",
+ " \"\"\"\n",
+ " emotion_as_list = pandas_vector_to_list(emotion)\n",
+ " y_data = []\n",
+ " for index in range(len(emotion_as_list)):\n",
+ " y_data.append(emotion_as_list[index])\n",
+ "\n",
+ " # Y data\n",
+ " y_data_categorical = np_utils.to_categorical(y_data, NUM_CLASSES)\n",
+ " return y_data_categorical\n",
+ "\n",
+ "\n",
+ "def process_pixels(pixels, img_size=IMG_SIZE):\n",
+ " \"\"\"\n",
+ " Takes in a string (pixels) that has space separated ints. Will transform the ints\n",
+ " to a 48x48 matrix of floats(/255).\n",
+ " :param pixels: string with space separated ints\n",
+ " :param img_size: image size\n",
+ " :return: array of 48x48 matrices\n",
+ " \"\"\"\n",
+ " pixels_as_list = pandas_vector_to_list(pixels)\n",
+ "\n",
+ " np_image_array = []\n",
+ " for index, item in enumerate(pixels_as_list):\n",
+ " # 48x48\n",
+ " data = np.zeros((img_size, img_size,3), dtype=np.uint8)\n",
+ " # split space separated ints\n",
+ " pixel_data = item.split()\n",
+ " \n",
+ " arr = np.reshape(pixel_data,(48,48,3))\n",
+ " '''print(arr.shape)\n",
+ " exit()\n",
+ " #print(pixel_data)\n",
+ " print('img_size',img_size)\n",
+ " \n",
+ " # 0 -> 47, loop through the rows\n",
+ " for i in range(0, img_size):\n",
+ " # (0 = 0), (1 = 47), (2 = 94), ...\n",
+ " pixel_index = i * img_size\n",
+ " print(pixel_index)\n",
+ " # (0 = [0:47]), (1 = [47: 94]), (2 = [94, 141]), ...\n",
+ " data[i] = pixel_data[pixel_index:pixel_index + img_size]\n",
+ " print(data[i])\n",
+ " exit()\n",
+ " '''\n",
+ "\n",
+ " np_image_array.append(arr)\n",
+ "\n",
+ " np_image_array = np.array(np_image_array)\n",
+ " # convert to float and divide by 255\n",
+ " np_image_array = np_image_array.astype('float32') / 255.0\n",
+ " return np_image_array\n",
+ "\n",
+ "\n",
+ "def get_vgg16_output(vgg16, array_input, n_feature_maps):\n",
+ " vg_input = array_input\n",
+ "\n",
+ " picture_train_features = vgg16.predict(vg_input)\n",
+ " del (vg_input)\n",
+ "\n",
+ " feature_map = np.empty([n_feature_maps, 512])\n",
+ " for idx_pic, picture in enumerate(picture_train_features):\n",
+ " feature_map[idx_pic] = picture\n",
+ " return feature_map\n",
+ "\n",
+ "\n",
+ "def duplicate_input_layer(array_input, size):\n",
+ " vg_input = np.empty([size, 48, 48, 3])\n",
+ " for index, item in enumerate(vg_input):\n",
+ " item[:, :, 0] = array_input[index]\n",
+ " item[:, :, 1] = array_input[index]\n",
+ " item[:, :, 2] = array_input[index]\n",
+ " return vg_input\n",
+ "\n",
+ "\n",
+ "def main():\n",
+ " # used to get the session/graph data from keras\n",
+ " K.set_learning_phase(0)\n",
+ " # get the data in a Pandas dataframe\n",
+ " raw_data = pd.read_csv(FLAGS.csv_file)\n",
+ "\n",
+ " # convert to one hot vectors\n",
+ " emotion_array = process_emotion(raw_data[['Emotion']])\n",
+ " # convert to a 48x48 float matrix\n",
+ " pixel_array = process_pixels(raw_data[['Pixel']])\n",
+ "\n",
+ " # split for test/train\n",
+ " y_train, y_test = split_for_test(emotion_array)\n",
+ " x_train_matrix, x_test_matrix = split_for_test(pixel_array)\n",
+ "\n",
+ " n_train = int(len(x_train_matrix))\n",
+ " n_test = int(len(x_test_matrix))\n",
+ "\n",
+ " x_train_input = x_train_matrix\n",
+ " x_test_input = x_test_matrix\n",
+ "\n",
+ " # vgg 16. include_top=False so the output is the 512 and use the learned weights\n",
+ " vgg16 = VGG16(include_top=False, input_shape=(48, 48, 3), pooling='avg', weights='imagenet')\n",
+ "\n",
+ " # get vgg16 outputs\n",
+ " x_train_feature_map = get_vgg16_output(vgg16, x_train_matrix, n_train)\n",
+ " x_test_feature_map = get_vgg16_output(vgg16, x_test_matrix, n_test)\n",
+ "\n",
+ " # build and train model\n",
+ " top_layer_model = Sequential()\n",
+ " top_layer_model.add(Dense(256, input_shape=(512,), activation='relu'))\n",
+ " top_layer_model.add(Dense(256, input_shape=(256,), activation='relu'))\n",
+ " top_layer_model.add(Dropout(0.75))\n",
+ " top_layer_model.add(Dense(64, input_shape=(256,)))\n",
+ " top_layer_model.add(Dense(NUM_CLASSES, activation='softmax'))\n",
+ "\n",
+ " adamax = Adamax()\n",
+ "\n",
+ " top_layer_model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=adamax, metrics=['accuracy'])\n",
+ "\n",
+ " # train\n",
+ " top_layer_model.fit(x_train_feature_map, y_train,\n",
+ " validation_data=(x_train_feature_map, y_train),\n",
+ " nb_epoch=FLAGS.n_epochs, batch_size=FLAGS.batch_size)\n",
+ " # Evaluate\n",
+ " score = top_layer_model.evaluate(x_test_feature_map,\n",
+ " y_test, batch_size=FLAGS.batch_size)\n",
+ "\n",
+ " print(\"After top_layer_model training (test set): {}\".format(score))\n",
+ "\n",
+ " # Merge two models and create the final_model_final_final\n",
+ " inputs = Input(shape=(48, 48, 3))\n",
+ " vg_output = vgg16(inputs)\n",
+ " print(\"vg_output: {}\".format(vg_output.shape))\n",
+ " # TODO: the 'pooling' argument of the VGG16 model is important for this to work otherwise you will have to squash\n",
+ " # output from (?, 1, 1, 512) to (?, 512)\n",
+ " model_predictions = top_layer_model(vg_output)\n",
+ " final_model = Model(input=inputs, output=model_predictions)\n",
+ " final_model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=adamax, metrics=['accuracy'])\n",
+ " final_model_score = final_model.evaluate(x_train_input,\n",
+ " y_train, batch_size=FLAGS.batch_size)\n",
+ " print(\"Sanity check - final_model (train score): {}\".format(final_model_score))\n",
+ "\n",
+ " final_model_score = final_model.evaluate(x_test_input,\n",
+ " y_test, batch_size=FLAGS.batch_size)\n",
+ " print(\"Sanity check - final_model (test score): {}\".format(final_model_score))\n",
+ " # config = final_model.get_config()\n",
+ " # weights = final_model.get_weights()\n",
+ "\n",
+ " # probably don't need to create a new model\n",
+ " # model_to_save = Model.from_config(config)\n",
+ " # model_to_save.set_weights(weights)\n",
+ " model_to_save = final_model\n",
+ " model_to_save.save('path_to_my_model.h5')\n",
+ " print(\"Model input name: {}\".format(model_to_save.input))\n",
+ " print(\"Model output name: {}\".format(model_to_save.output))\n",
+ "\n",
+ " # Save Model\n",
+ " builder = saved_model_builder.SavedModelBuilder(FLAGS.export_path)\n",
+ " signature = predict_signature_def(inputs={'images': model_to_save.input},\n",
+ " outputs={'scores': model_to_save.output})\n",
+ " with K.get_session() as sess:\n",
+ " builder.add_meta_graph_and_variables(sess=sess,\n",
+ " tags=[tag_constants.SERVING],\n",
+ " signature_def_map={'predict': signature})\n",
+ " builder.save()\n",
+ " \n",
+ " \n",
+ "if __name__ == \"__main__\":\n",
+ " main()"
+ ],
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "error",
+ "ename": "FileNotFoundError",
+ "evalue": "ignored",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 231\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"__main__\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 232\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mK\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_learning_phase\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0;31m# get the data in a Pandas dataframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 145\u001b[0;31m \u001b[0mraw_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcsv_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 146\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0;31m# convert to one hot vectors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)\u001b[0m\n\u001b[1;32m 700\u001b[0m skip_blank_lines=skip_blank_lines)\n\u001b[1;32m 701\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 702\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 703\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 704\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 427\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 428\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 429\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 430\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mchunksize\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0miterator\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'has_index_names'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 895\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, engine)\u001b[0m\n\u001b[1;32m 1120\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'c'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1121\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'c'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1122\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mCParserWrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1123\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1124\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'python'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, src, **kwds)\u001b[0m\n\u001b[1;32m 1851\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'usecols'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0musecols\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1852\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1853\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mparsers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTextReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1854\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munnamed_cols\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munnamed_cols\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1855\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader.__cinit__\u001b[0;34m()\u001b[0m\n",
+ "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._setup_parser_source\u001b[0;34m()\u001b[0m\n",
+ "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] File b'hi7.csv' does not exist: b'hi7.csv'"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "ZqK4xeIiHb9l",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "source": [
+ "angel\n",
+ "filename = \"hbhi.csv\"\n",
+ "if not os.path.isfile(filename):\n",
+ " urllib.request.urlretrieve(url, filename)\n",
+ "'''\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import tensorflow as tf\n",
+ "from keras import backend as K\n",
+ "from keras.applications.vgg16 import VGG16\n",
+ "from keras.layers import Dense, Dropout, Input\n",
+ "from keras.models import Model\n",
+ "from keras.models import Sequential\n",
+ "from keras.optimizers import Adamax\n",
+ "from keras.utils import np_utils\n",
+ "from tensorflow.python.saved_model import builder as saved_model_builder\n",
+ "from tensorflow.python.saved_model import utils\n",
+ "from tensorflow.python.saved_model import tag_constants, signature_constants\n",
+ "from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def\n",
+ "from tensorflow.contrib.session_bundle import exporter\n",
+ "class Namespace:\n",
+ " def __init__(self, **kwargs):\n",
+ " self.__dict__.update(kwargs)\n",
+ "'''\n",
+ "parser = argparse.ArgumentParser()\n",
+ "parser.add_argument('--csv_file', type=str, default='hbhi.csv')\n",
+ "parser.add_argument('--export_path', type=str,default='model_out/')\n",
+ "# OPTIONAL\n",
+ "parser.add_argument('--batch_size', type=int, default=1)\n",
+ "parser.add_argument('--n_epochs', type=int, default=1)\n",
+ "parser.add_argument('--debug', dest='debug', action='store_true')\n",
+ "'''\n",
+ "#FLAGS = parser.parse_args()\n",
+ "FLAGS = Namespace(batch_size=1, csv_file='hi7.csv', debug=False, export_path='model_o/', n_epochs=1)\n",
+ "\n",
+ "if (FLAGS.debug):\n",
+ " FLAGS.batch_size = 10\n",
+ " FLAGS.n_epochs = 1\n",
+ "\n",
+ "NUM_CLASSES = 2\n",
+ "IMG_SIZE = 48\n",
+ "\n",
+ "# TODO: Use the 'Usage' field to separate based on training/testing\n",
+ "TRAIN_END = 2713\n",
+ "TEST_START = TRAIN_END + 1\n",
+ "\n",
+ "\n",
+ "def split_for_test(list):\n",
+ " train = list[0:TRAIN_END]\n",
+ " test = list[TEST_START:]\n",
+ " return train, test\n",
+ "\n",
+ "\n",
+ "def pandas_vector_to_list(pandas_df):\n",
+ " py_list = [item[0] for item in pandas_df.values.tolist()]\n",
+ " return py_list\n",
+ "\n",
+ "\n",
+ "def process_emotion(emotion):\n",
+ " \"\"\"\n",
+ " Takes in a vector of emotions and outputs a list of emotions as one-hot vectors.\n",
+ " :param emotion: vector of ints (0-7)\n",
+ " :return: list of one-hot vectors (array of 7)\n",
+ " \"\"\"\n",
+ " emotion_as_list = pandas_vector_to_list(emotion)\n",
+ " y_data = []\n",
+ " for index in range(len(emotion_as_list)):\n",
+ " y_data.append(emotion_as_list[index])\n",
+ "\n",
+ " # Y data\n",
+ " y_data_categorical = np_utils.to_categorical(y_data, NUM_CLASSES)\n",
+ " return y_data_categorical\n",
+ "\n",
+ "\n",
+ "def process_pixels(pixels, img_size=IMG_SIZE):\n",
+ " \"\"\"\n",
+ " Takes in a string (pixels) that has space separated ints. Will transform the ints\n",
+ " to a 48x48 matrix of floats(/255).\n",
+ " :param pixels: string with space separated ints\n",
+ " :param img_size: image size\n",
+ " :return: array of 48x48 matrices\n",
+ " \"\"\"\n",
+ " pixels_as_list = pandas_vector_to_list(pixels)\n",
+ "\n",
+ " np_image_array = []\n",
+ " for index, item in enumerate(pixels_as_list):\n",
+ " # 48x48\n",
+ " data = np.zeros((img_size, img_size,3), dtype=np.uint8)\n",
+ " # split space separated ints\n",
+ " pixel_data = item.split()\n",
+ " \n",
+ " arr = np.reshape(pixel_data,(48,48,3))\n",
+ " '''print(arr.shape)\n",
+ " exit()\n",
+ " #print(pixel_data)\n",
+ " print('img_size',img_size)\n",
+ " \n",
+ " # 0 -> 47, loop through the rows\n",
+ " for i in range(0, img_size):\n",
+ " # (0 = 0), (1 = 47), (2 = 94), ...\n",
+ " pixel_index = i * img_size\n",
+ " print(pixel_index)\n",
+ " # (0 = [0:47]), (1 = [47: 94]), (2 = [94, 141]), ...\n",
+ " data[i] = pixel_data[pixel_index:pixel_index + img_size]\n",
+ " print(data[i])\n",
+ " exit()\n",
+ " '''\n",
+ "\n",
+ " np_image_array.append(arr)\n",
+ "\n",
+ " np_image_array = np.array(np_image_array)\n",
+ " # convert to float and divide by 255\n",
+ " np_image_array = np_image_array.astype('float32') / 255.0\n",
+ " return np_image_array\n",
+ "\n",
+ "\n",
+ "def get_vgg16_output(vgg16, array_input, n_feature_maps):\n",
+ " vg_input = array_input\n",
+ "\n",
+ " picture_train_features = vgg16.predict(vg_input)\n",
+ " del (vg_input)\n",
+ "\n",
+ " feature_map = np.empty([n_feature_maps, 512])\n",
+ " for idx_pic, picture in enumerate(picture_train_features):\n",
+ " feature_map[idx_pic] = picture\n",
+ " return feature_map\n",
+ "\n",
+ "\n",
+ "def duplicate_input_layer(array_input, size):\n",
+ " vg_input = np.empty([size, 48, 48, 3])\n",
+ " for index, item in enumerate(vg_input):\n",
+ " item[:, :, 0] = array_input[index]\n",
+ " item[:, :, 1] = array_input[index]\n",
+ " item[:, :, 2] = array_input[index]\n",
+ " return vg_input\n",
+ "\n",
+ "\n",
+ "def main():\n",
+ " # used to get the session/graph data from keras\n",
+ " K.set_learning_phase(0)\n",
+ " \n",
+ " # get the data in a Pandas dataframe\n",
+ " raw_data = pd.read_csv(FLAGS.csv_file)\n",
+ "\n",
+ " # convert to one hot vectors\n",
+ " emotion_array = process_emotion(raw_data[['Emotion']])\n",
+ " # convert to a 48x48 float matrix\n",
+ " pixel_array = process_pixels(raw_data[['Pixel']])\n",
+ "\n",
+ " # split for test/train\n",
+ " y_train, y_test = split_for_test(emotion_array)\n",
+ " x_train_matrix, x_test_matrix = split_for_test(pixel_array)\n",
+ "\n",
+ " n_train = int(len(x_train_matrix))\n",
+ " n_test = int(len(x_test_matrix))\n",
+ "\n",
+ " x_train_input = x_train_matrix\n",
+ " x_test_input = x_test_matrix\n",
+ "\n",
+ " # vgg 16. include_top=False so the output is the 512 and use the learned weights\n",
+ " vgg16 = VGG16(include_top=False, input_shape=(48, 48, 3), pooling='avg', weights='imagenet')\n",
+ " \n",
+ " f1_score_prev=0\n",
+ " lest=[0.125,0.25,0.375,0.5,0.625,0.75,0.875]\n",
+ " for leo in lest:\n",
+ " for i in range(0,20):\n",
+ " \n",
+ " # get vgg16 outputs\n",
+ " x_train_feature_map = get_vgg16_output(vgg16, x_train_matrix, n_train)\n",
+ " x_test_feature_map = get_vgg16_output(vgg16, x_test_matrix, n_test)\n",
+ "\n",
+ " # build and train model\n",
+ " top_layer_model = Sequential()\n",
+ " top_layer_model.add(Dense(256, input_shape=(512,), activation='relu'))\n",
+ " top_layer_model.add(Dense(256, input_shape=(256,), activation='relu'))\n",
+ " top_layer_model.add(Dropout(leo))\n",
+ " top_layer_model.add(Dense(int(256-256*leo), input_shape=(256,)))\n",
+ " top_layer_model.add(Dense(NUM_CLASSES, activation='softmax'))\n",
+ "\n",
+ " adamax = Adamax()\n",
+ "\n",
+ " top_layer_model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=adamax, metrics=['accuracy'])\n",
+ "\n",
+ " # train\n",
+ " top_layer_model.fit(x_train_feature_map, y_train,\n",
+ " validation_data=(x_train_feature_map, y_train),\n",
+ " nb_epoch=FLAGS.n_epochs, batch_size=FLAGS.batch_size)\n",
+ " # Evaluate\n",
+ " score = top_layer_model.evaluate(x_test_feature_map,\n",
+ " y_test, batch_size=FLAGS.batch_size)\n",
+ "\n",
+ " print(\"After top_layer_model training (test set): {}\".format(score))\n",
+ "\n",
+ " # Merge two models and create the final_model_final_final\n",
+ " inputs = Input(shape=(48, 48, 3))\n",
+ " vg_output = vgg16(inputs)\n",
+ " print(\"vg_output: {}\".format(vg_output.shape))\n",
+ " # TODO: the 'pooling' argument of the VGG16 model is important for this to work otherwise you will have to squash\n",
+ " # output from (?, 1, 1, 512) to (?, 512)\n",
+ " model_predictions = top_layer_model(vg_output)\n",
+ " final_model = Model(input=inputs, output=model_predictions)\n",
+ " final_model.compile(loss='categorical_crossentropy',\n",
+ " optimizer=adamax, metrics=['accuracy'])\n",
+ " final_model_score = final_model.evaluate(x_train_input,\n",
+ " y_train, batch_size=FLAGS.batch_size)\n",
+ " print(\"Sanity check - final_model (train score): {}\".format(final_model_score))\n",
+ "\n",
+ " final_model_score = final_model.evaluate(x_test_input,\n",
+ " y_test, batch_size=FLAGS.batch_size)\n",
+ " print(\"Sanity check - final_model (test score): {}\".format(final_model_score))\n",
+ " # config = final_model.get_config()\n",
+ " # weights = final_model.get_weights()\n",
+ "\n",
+ " # probably don't need to create a new model\n",
+ " # model_to_save = Model.from_config(config)\n",
+ " # model_to_save.set_weights(weights)\n",
+ " y=final_model.predict(x_test_input)\n",
+ " true_positive=0\n",
+ " false_positive=0\n",
+ " false_negative=0\n",
+ " true_negative=0\n",
+ "\n",
+ " for (i,j) in zip(np.round(y),y_test): \n",
+ " if(i[0]==j[0]==1. and i[1]==j[1]==0.):\n",
+ " true_negative+=1\n",
+ " if(i[0]==j[0]==0. and i[1]==j[1]==1.):\n",
+ " true_positive+=1\n",
+ " if(i[0]==j[1]==1. and i[1]==j[0]==0.):\n",
+ " false_negative+=1\n",
+ " if(i[0]==j[1]==0. and i[1]==j[0]==1.):\n",
+ " false_positive+=1\n",
+ "\n",
+ " pre=true_positive/(true_positive+false_positive)\n",
+ " recal=true_positive/(true_positive+false_negative)\n",
+ " f1=2*(pre*recal)/(pre+recal)\n",
+ " if(f1>f1_score_prev):\n",
+ " f1_score_prev=f1\n",
+ " print(\"***************MODEL F1-SCORE BETTER!!! MODEL SAVED********************\")\n",
+ " model_to_save = final_model\n",
+ " model_to_save.save('path_to_my_model.h5')\n",
+ " print(\"F1 score for {} regularization is\".format(leo))\n",
+ " print(f1)\n",
+ " print(\"Model input name: {}\".format(model_to_save.input))\n",
+ " print(\"Model output name: {}\".format(model_to_save.output))\n",
+ "\n",
+ " '''\n",
+ " # Save Model\n",
+ " builder = saved_model_builder.SavedModelBuilder(FLAGS.export_path)\n",
+ " signature = predict_signature_def(inputs={'images': model_to_save.input},\n",
+ " outputs={'scores': model_to_save.output})\n",
+ " with K.get_session() as sess:\n",
+ " builder.add_meta_graph_and_variables(sess=sess,\n",
+ " tags=[tag_constants.SERVING],\n",
+ " signature_def_map={'predict': signature})\n",
+ " builder.save()\n",
+ " ''' \n",
+ " \n",
+ "if __name__ == \"__main__\":\n",
+ " main()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/fibbonaci b/fibbonaci
new file mode 100644
index 0000000..cd4b86d
--- /dev/null
+++ b/fibbonaci
@@ -0,0 +1,21 @@
+nterms = int(input("How many terms you want? "))
+# first two terms
+n1 = 0
+n2 = 1
+count = 2
+# check if the number of terms is valid
+if nterms <= 0:
+ print("Plese enter a positive integer")
+elif nterms == 1:
+ print("Fibonacci sequence:")
+ print(n1)
+else:
+ print("Fibonacci sequence:")
+ print(n1,",",n2,end=', ')
+ while count < nterms:
+ nth = n1 + n2
+ print(nth,end=' , ')
+ # update values
+ n1 = n2
+ n2 = nth
+ count += 1
diff --git a/fibbonaci python b/fibbonaci python
new file mode 100644
index 0000000..cd4b86d
--- /dev/null
+++ b/fibbonaci python
@@ -0,0 +1,21 @@
+nterms = int(input("How many terms you want? "))
+# first two terms
+n1 = 0
+n2 = 1
+count = 2
+# check if the number of terms is valid
+if nterms <= 0:
+ print("Plese enter a positive integer")
+elif nterms == 1:
+ print("Fibonacci sequence:")
+ print(n1)
+else:
+ print("Fibonacci sequence:")
+ print(n1,",",n2,end=', ')
+ while count < nterms:
+ nth = n1 + n2
+ print(nth,end=' , ')
+ # update values
+ n1 = n2
+ n2 = nth
+ count += 1
diff --git a/pacman.py b/pacman.py
new file mode 100644
index 0000000..6d98922
--- /dev/null
+++ b/pacman.py
@@ -0,0 +1,159 @@
+from random import choice
+from turtle import *
+from freegames import floor, vector
+
+state = {'score': 0}
+path = Turtle(visible=False)
+writer = Turtle(visible=False)
+aim = vector(5, 0)
+pacman = vector(-40, -80)
+ghosts = [
+ [vector(-180, 160), vector(5, 0)],
+ [vector(-180, -160), vector(0, 5)],
+ [vector(100, 160), vector(0, -5)],
+ [vector(100, -160), vector(-5, 0)],
+]
+tiles = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0,
+ 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+]
+
+def square(x, y):
+ "Draw square using path at (x, y)."
+ path.up()
+ path.goto(x, y)
+ path.down()
+ path.begin_fill()
+
+ for count in range(4):
+ path.forward(20)
+ path.left(90)
+
+ path.end_fill()
+
+def offset(point):
+ "Return offset of point in tiles."
+ x = (floor(point.x, 20) + 200) / 20
+ y = (180 - floor(point.y, 20)) / 20
+ index = int(x + y * 20)
+ return index
+
+def valid(point):
+ "Return True if point is valid in tiles."
+ index = offset(point)
+
+ if tiles[index] == 0:
+ return False
+
+ index = offset(point + 19)
+
+ if tiles[index] == 0:
+ return False
+
+ return point.x % 20 == 0 or point.y % 20 == 0
+
+def world():
+ "Draw world using path."
+ bgcolor('black')
+ path.color('blue')
+
+ for index in range(len(tiles)):
+ tile = tiles[index]
+
+ if tile > 0:
+ x = (index % 20) * 20 - 200
+ y = 180 - (index // 20) * 20
+ square(x, y)
+
+ if tile == 1:
+ path.up()
+ path.goto(x + 10, y + 10)
+ path.dot(2, 'white')
+
+def move():
+ "Move pacman and all ghosts."
+ writer.undo()
+ writer.write(state['score'])
+
+ clear()
+
+ if valid(pacman + aim):
+ pacman.move(aim)
+
+ index = offset(pacman)
+
+ if tiles[index] == 1:
+ tiles[index] = 2
+ state['score'] += 1
+ x = (index % 20) * 20 - 200
+ y = 180 - (index // 20) * 20
+ square(x, y)
+
+ up()
+ goto(pacman.x + 10, pacman.y + 10)
+ dot(20, 'yellow')
+
+ for point, course in ghosts:
+ if valid(point + course):
+ point.move(course)
+ else:
+ options = [
+ vector(5, 0),
+ vector(-5, 0),
+ vector(0, 5),
+ vector(0, -5),
+ ]
+ plan = choice(options)
+ course.x = plan.x
+ course.y = plan.y
+
+ up()
+ goto(point.x + 10, point.y + 10)
+ dot(20, 'red')
+
+ update()
+
+ for point, course in ghosts:
+ if abs(pacman - point) < 20:
+ return
+
+ ontimer(move, 100)
+
+def change(x, y):
+ "Change pacman aim if valid."
+ if valid(pacman + vector(x, y)):
+ aim.x = x
+ aim.y = y
+
+setup(420, 420, 370, 0)
+hideturtle()
+tracer(False)
+writer.goto(160, 160)
+writer.color('white')
+writer.write(state['score'])
+listen()
+onkey(lambda: change(5, 0), 'Right')
+onkey(lambda: change(-5, 0), 'Left')
+onkey(lambda: change(0, 5), 'Up')
+onkey(lambda: change(0, -5), 'Down')
+world()
+move()
+done()
diff --git a/primepython b/primepython
new file mode 100644
index 0000000..5da8b7a
--- /dev/null
+++ b/primepython
@@ -0,0 +1,13 @@
+num = int(input("Enter a number: "))
+
+if num > 1:
+ for i in range(2,num):
+ if (num % i) == 0:
+ print(num,"is not a prime number")
+ print(i,"times",num//i,"is",num)
+ break
+ else:
+ print(num,"is a prime number")
+
+else:
+ print(num,"is not a prime number")
diff --git a/snake.py b/snake.py
new file mode 100644
index 0000000..d324cec
--- /dev/null
+++ b/snake.py
@@ -0,0 +1,55 @@
+from turtle import *
+from random import randrange
+from freegames import square, vector
+
+food = vector(0, 0)
+snake = [vector(10, 0)]
+aim = vector(0, -10)
+
+def change(x, y):
+ "Change snake direction."
+ aim.x = x
+ aim.y = y
+
+def inside(head):
+ "Return True if head inside boundaries."
+ return -200 < head.x < 190 and -200 < head.y < 190
+
+def move():
+ "Move snake forward one segment."
+ head = snake[-1].copy()
+ head.move(aim)
+
+ if not inside(head) or head in snake:
+ square(head.x, head.y, 9, 'red')
+ update()
+ return
+
+ snake.append(head)
+
+ if head == food:
+ print('Snake:', len(snake))
+ food.x = randrange(-15, 15) * 10
+ food.y = randrange(-15, 15) * 10
+ else:
+ snake.pop(0)
+
+ clear()
+
+ for body in snake:
+ square(body.x, body.y, 9, 'black')
+
+ square(food.x, food.y, 9, 'green')
+ update()
+ ontimer(move, 100)
+
+setup(420, 420, 370, 0)
+hideturtle()
+tracer(False)
+listen()
+onkey(lambda: change(10, 0), 'Right')
+onkey(lambda: change(-10, 0), 'Left')
+onkey(lambda: change(0, 10), 'Up')
+onkey(lambda: change(0, -10), 'Down')
+move()
+done()