Keras 官方案例

来源:互联网 发布:变老软件电脑版 编辑:程序博客网 时间:2024/06/11 23:38

Keras 官方案例

标签(空格分隔): Keras学习


  • Keras 官方案例
    • MLP多分类问题
    • MLP二分类问题
    • VGG-like模型
    • LSTM二分类
    • 序列一维卷积二分类
    • stacted LTSM多分类问题
    • stateful LSTM 多分类问题

MLP多分类问题

import kerasfrom keras.models import Sequentialfrom keras.layers import Dense, Dropout, Activationfrom keras.optimizers import SGD# Generate dummy dataimport numpy as npx_train = np.random.random((1000, 20))y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)x_test = np.random.random((100, 20))y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)model = Sequential()# Dense(64) is a fully-connected layer with 64 hidden units.# in the first layer, you must specify the expected input data shape:# here, 20-dimensional vectors.model.add(Dense(64, activation='relu', input_dim=20))model.add(Dropout(0.5))model.add(Dense(64, activation='relu'))model.add(Dropout(0.5))model.add(Dense(10, activation='softmax'))sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)model.compile(loss='categorical_crossentropy',              optimizer=sgd,              metrics=['accuracy'])model.fit(x_train, y_train,          epochs=20,          batch_size=128)score = model.evaluate(x_test, y_test, batch_size=128)

MLP二分类问题

import numpy as npfrom keras.models import Sequentialfrom keras.layers import Dense, Dropout# Generate dummy datax_train = np.random.random((1000, 20))y_train = np.random.randint(2, size=(1000, 1))x_test = np.random.random((100, 20))y_test = np.random.randint(2, size=(100, 1))model = Sequential()model.add(Dense(64, input_dim=20, activation='relu'))model.add(Dropout(0.5))model.add(Dense(64, activation='relu'))model.add(Dropout(0.5))model.add(Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy',              optimizer='rmsprop',              metrics=['accuracy'])model.fit(x_train, y_train,          epochs=20,          batch_size=128)score = model.evaluate(x_test, y_test, batch_size=128)

VGG-like模型

import numpy as npimport kerasfrom keras.models import Sequentialfrom keras.layers import Dense, Dropout, Flattenfrom keras.layers import Conv2D, MaxPooling2Dfrom keras.optimizers import SGD# Generate dummy datax_train = np.random.random((100, 100, 100, 3))y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)x_test = np.random.random((20, 100, 100, 3))y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)model = Sequential()# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.# this applies 32 convolution filters of size 3x3 each.model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))model.add(Conv2D(32, (3, 3), activation='relu'))model.add(MaxPooling2D(pool_size=(2, 2)))model.add(Dropout(0.25))model.add(Conv2D(64, (3, 3), activation='relu'))model.add(Conv2D(64, (3, 3), activation='relu'))model.add(MaxPooling2D(pool_size=(2, 2)))model.add(Dropout(0.25))model.add(Flatten())model.add(Dense(256, activation='relu'))model.add(Dropout(0.5))model.add(Dense(10, activation='softmax'))sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)model.compile(loss='categorical_crossentropy', optimizer=sgd)model.fit(x_train, y_train, batch_size=32, epochs=10)score = model.evaluate(x_test, y_test, batch_size=32)

LSTM二分类

from keras.models import Sequentialfrom keras.layers import Dense, Dropoutfrom keras.layers import Embeddingfrom keras.layers import LSTMmodel = Sequential()model.add(Embedding(max_features, output_dim=256))model.add(LSTM(128))model.add(Dropout(0.5))model.add(Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy',              optimizer='rmsprop',              metrics=['accuracy'])model.fit(x_train, y_train, batch_size=16, epochs=10)score = model.evaluate(x_test, y_test, batch_size=16)

序列一维卷积二分类

from keras.models import Sequentialfrom keras.layers import Dense, Dropoutfrom keras.layers import Embeddingfrom keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1Dmodel = Sequential()model.add(Conv1D(64, 3, activation='relu', input_shape=(seq_length, 100)))model.add(Conv1D(64, 3, activation='relu'))model.add(MaxPooling1D(3))model.add(Conv1D(128, 3, activation='relu'))model.add(Conv1D(128, 3, activation='relu'))model.add(GlobalAveragePooling1D())model.add(Dropout(0.5))model.add(Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy',              optimizer='rmsprop',              metrics=['accuracy'])model.fit(x_train, y_train, batch_size=16, epochs=10)score = model.evaluate(x_test, y_test, batch_size=16)

stacted LTSM多分类问题

from keras.models import Sequentialfrom keras.layers import LSTM, Denseimport numpy as npdata_dim = 16timesteps = 8num_classes = 10# expected input data shape: (batch_size, timesteps, data_dim)model = Sequential()model.add(LSTM(32, return_sequences=True,               input_shape=(timesteps, data_dim)))  # returns a sequence of vectors of dimension 32model.add(LSTM(32, return_sequences=True))  # returns a sequence of vectors of dimension 32model.add(LSTM(32))  # return a single vector of dimension 32model.add(Dense(10, activation='softmax'))model.compile(loss='categorical_crossentropy',              optimizer='rmsprop',              metrics=['accuracy'])# Generate dummy training datax_train = np.random.random((1000, timesteps, data_dim))y_train = np.random.random((1000, num_classes))# Generate dummy validation datax_val = np.random.random((100, timesteps, data_dim))y_val = np.random.random((100, num_classes))model.fit(x_train, y_train,          batch_size=64, epochs=5,          validation_data=(x_val, y_val))

stateful LSTM 多分类问题

A stateful recurrent model is one for which the internal states (memories) obtained after processing a batch of samples are reused as initial states for the samples of the next batch. This allows to process longer sequences while keeping computational complexity manageable.

from keras.models import Sequentialfrom keras.layers import LSTM, Denseimport numpy as npdata_dim = 16timesteps = 8num_classes = 10batch_size = 32# Expected input batch shape: (batch_size, timesteps, data_dim)# Note that we have to provide the full batch_input_shape since the network is stateful.# the sample of index i in batch k is the follow-up for the sample i in batch k-1.model = Sequential()model.add(LSTM(32, return_sequences=True, stateful=True,               batch_input_shape=(batch_size, timesteps, data_dim)))model.add(LSTM(32, return_sequences=True, stateful=True))model.add(LSTM(32, stateful=True))model.add(Dense(10, activation='softmax'))model.compile(loss='categorical_crossentropy',              optimizer='rmsprop',              metrics=['accuracy'])# Generate dummy training datax_train = np.random.random((batch_size * 10, timesteps, data_dim))y_train = np.random.random((batch_size * 10, num_classes))# Generate dummy validation datax_val = np.random.random((batch_size * 3, timesteps, data_dim))y_val = np.random.random((batch_size * 3, num_classes))model.fit(x_train, y_train,          batch_size=batch_size, epochs=5, shuffle=False,          validation_data=(x_val, y_val))
原创粉丝点击