An exclusive raffle opportunity for active members like you! Complete your profile, answer questions and get your first accepted badge to enter the raffle.
<code> <code><code> datetime.now().strftime("%Y%m%d_%H%M%S")+".h5" model.save(model_name) model.save_weights("/tmp/keras_weights_"+random_name+".h5") with open("/tmp/keras_model_"+random_name+".json", 'w') as f: json.dump(model.to_json(), f, ensure_ascii=False) ### Evaluate the trained model # Evaluate the model on training data and test data train_score = model.evaluate(x_train, y_train, verbose=0) valid_score = model.evaluate(x_test, y_test, verbose=0) ### Prepare results # Return the model location model_spec = pd.DataFrame.from_dict({'Keras_Model': [model_name]}) # Prepare evaluation results eval_data = np.array([<code> ['', 'acc', 'loss', 'val_acc', 'val_loss'],<code> ['', round(train_score[1], 4), round(train_score[0], 4), <code> round(valid_score[1], 4), round(valid_score[0], 4)]]) eval_result = pd.DataFrame(data=eval_data[1:,1:], index=eval_data[1:,0], columns=eval_data[0,1:]) # Prepare evaluation history if not mpath.isspace() and mpath != '': df_history = eval_result else: df_history = pd.DataFrame.from_dict(history.history) return model_spec, df_history, eval_resultfrom __future__ import print_function<br>import keras<br>from keras.models import Sequential<br>from keras.layers import Dense, Dropout, Flatten<br>from keras.layers import Conv2D, MaxPooling2D<br>from keras.callbacks import TensorBoard<br>from keras.utils import plot_model<br>from keras.models import load_model<br>from datetime import datetime<br>import numpy as np<br>import pandas as pd<br>import json <br><br>def rm_main(train_data, test_data):<br><br> ### Set training defaults, could be parametrised<br> batch_size = 128<br> epochs = 12<br> <br> ### Get data for training and testing<br> x_train = train_data['x']<br> y_train = train_data['y']<br> x_test = test_data['x']<br> y_test = test_data['y']<br> img_rows = train_data['rows']<br> img_cols = train_data['cols']<br> input_shape = train_data['shape']<br> num_classes = len(y_train[0])<br><br> print('Training shape:', input_shape)<br> print(x_train.shape[0], 'training samples')<br> print(x_test.shape[0], 'test samples')<br> print(num_classes, 'classes')<br><br> ### Create a model<br> mpath = '%{mpath}'<br> print('Loading model from: '+mpath)<br> if not mpath.isspace() and mpath != '':<br> # Load existing model<br> model = load_model(mpath)<br> model_name = mpath<br> else: <br> # Create a new model architecture<br> model = Sequential()<br> model.add(Conv2D(32, kernel_size=(3, 3),<br> activation='relu',<br> input_shape=input_shape))<br> model.add(Conv2D(64, (3, 3), activation='relu'))<br> model.add(MaxPooling2D(pool_size=(2, 2)))<br> model.add(Dropout(0.25))<br> model.add(Flatten())<br> model.add(Dense(128, activation='relu'))<br> model.add(Dropout(0.5))<br> model.add(Dense(num_classes, activation='softmax'))<br> <br> model.compile(loss=keras.losses.categorical_crossentropy,<br> optimizer=keras.optimizers.Adadelta(),<br> metrics=['accuracy'])<br> <br> # Train the model and retain epoch evaluation<br> history = model.fit(x_train, y_train,<br> batch_size=batch_size,<br> epochs=epochs,<br> verbose=2,<br> validation_data=(x_test, y_test),<br> callbacks=[TensorBoard(log_dir='/tmp/keras_logs')])<br> <br> # Save the model, its architecture (in JSON) and its weights<br> random_name = datetime.now().strftime("%Y%m%d_%H%M%S")<br> model_name = "/tmp/keras_model_"+<br>
<code>
<code><code>
This is the one that bugged me with the new deep learning extension for CNN and I requested @pschlunder support to look into this in earlier threads, he might be working on it (not sure). Thread here https://community.rapidminer.com/discussion/54816/regarding-input-shape-of-data-into-cnn-deep-learning-extension#latest
For the current process, my interest would be related to keras example with serializing a model as I see many users asking questions related to keras, even though the questions are related to keras model extension, as it is not maintained currently we can refer to the example (from you) with python scripting if they particularly want to use keras library, instead of DL4J (new Deep learning extension). But anyway @sgenzer might have other ideas for the community samples.
Thanks