分数最大化池:我发现很难实现的深层神经网络

2024-10-02 22:38:20 发布

您现在位置:Python中文网/ 问答频道 /正文

基于Benjamin Graham的论文《分数最大池》,我尝试使用CIFAR-10数据集编写网络代码:

(160nC2)−FMP3√2)12−C2−C1−输出

但是在python解析之后:

import numpy
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
from keras.layers import Conv2D,Conv1D,LSTM
from keras.layers.core import Reshape
from keras.layers.pooling import MaxPooling1D
from Fractional_MAXPOOL import FractionalPooling2D
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
#K.tensorflow_backend.set_image_dim_ordering('tf')

# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)

# load data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()

X_train = X_train[0:49984]
y_train = y_train[0:49984]
X_test = X_test[0:9984]
y_test = y_test[0:9984]

# normalize inputs from 0-255 to 0.0-1.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

# print("num_classes = " + str(num_classes))

# Create the model
model = Sequential()
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(160, (2, 2), batch_input_shape=(64, 32, 32, 3), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(FractionalPooling2D(pool_ratio=(1, 1.6, 1.6, 1),pseudo_random = True,overlap=True))
model.add(LeakyReLU(alpha = 0.3))

# Block 2
model.add(Conv2D(128, (2, 2), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(LeakyReLU(alpha = 0.3))
model.add(Dense(num_classes, activation='softmax'))

opt = keras.optimizers.Adadelta(0.1,decay=1e-4)

model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['accuracy'])
print(model.summary())

checkpoint = ModelCheckpoint('Model.hdf5', monitor='val_loss', save_best_only = True, verbose=1, mode='min')

callbacks_list = [checkpoint]
#model.load_weights('Model.hdf5')
epochs = 1000
model.fit(X_train, y_train, validation_data = [X_test,y_test], nb_epoch=epochs, batch_size=64, callbacks=callbacks_list)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))

keras/tf给了我以下错误:

ValueError:检查目标时出错:预期密集_1有4个维度,但得到了具有形状的数组(49984,10)


Tags: fromtestimportalphaaddtruemodelbatch
1条回答
网友
1楼 · 发布于 2024-10-02 22:38:20

有两个问题。首先是不展平特征(这会产生有关目标的错误),以及一个杂散的Conv1D层,它应该是Conv2D

# Block 2
model.add(Conv2D(128, (2, 2), padding='same'))
model.add(LeakyReLU(alpha = 0.3))
model.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model.add(LeakyReLU(alpha = 0.3))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

有了这些变化,模型应该能够进行训练。您还必须导入Flatten

相关问题 更多 >