From ef5ac9ba792d3347c14a938ab7a60d93dbefa5b3 Mon Sep 17 00:00:00 2001 From: Jason Bunk Date: Fri, 15 Jul 2016 11:45:11 -0700 Subject: [PATCH] This fixes issue #1 by updating Example to latest Keras API (tested with Theano backend) --- Example/callbacks.py | 11 +++++++++-- Example/sentiment_lstm_regression.py | 5 +++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/Example/callbacks.py b/Example/callbacks.py index 32381d5..22f551f 100644 --- a/Example/callbacks.py +++ b/Example/callbacks.py @@ -3,6 +3,12 @@ from keras import backend as K from keras import models +def standardize_X(X): + if type(X) == list: + return X + else: + return [X] + class ModelTest(Callback): ''' Test model at the end of every X epochs. @@ -61,9 +67,10 @@ def predict_stochastic(self, X, batch_size=128, verbose=0): - [Dropout: A simple way to prevent neural networks from overfitting](http://jmlr.org/papers/v15/srivastava14a.html) - [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](http://arxiv.org/abs/1506.02142) ''' - X = models.standardize_X(X) + X = standardize_X(X) if self._predict_stochastic is None: # we only get self.model after init - self._predict_stochastic = K.function([self.model.X_test], [self.model.y_train]) + self._predict_stochastic = K.function([self.model.inputs[0]], [self.model.outputs[0]], + givens={K.learning_phase(): np.uint8(1)}) return self.model._predict_loop(self._predict_stochastic, X, batch_size, verbose)[0] diff --git a/Example/sentiment_lstm_regression.py b/Example/sentiment_lstm_regression.py index 83642cf..b677733 100644 --- a/Example/sentiment_lstm_regression.py +++ b/Example/sentiment_lstm_regression.py @@ -49,6 +49,11 @@ X_train, X_test, Y_train, Y_test = dataset.X_train, dataset.X_test, dataset.Y_train, dataset.Y_test mean_y_train, std_y_train = dataset.mean_y_train, dataset.std_y_train +X_train = np.asarray(X_train) +X_test = np.asarray(X_test) +Y_train = np.asarray(Y_train) +Y_test = np.asarray(Y_test) + # Set seed: np.random.seed(global_seed)