我正在使用python 3.8.8和TensorFlow 2.4开发面部表情检测模型
我的代码:
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
num_classes = 5
img_rows, img_cols = 48, 48
batch_size = 32
train_data_dir = r'D:\College Projects\SEM6_proj\Music-Recommendation-using-Facial-Expressions-master\train'
validation_data_dir = r'D:\College Projects\SEM6_proj\Music-Recommendation-using-Facial-Expressions-master\validation'
train_datagen = ImageDataGenerator( rescale= 1./255,
rotation_range= 30,
shear_range= 0.3,
zoom_range= 0.3,
width_shift_range= 0.4,
height_shift_range= 0.4,
horizontal_flip= True,
vertical_flip= True,
fill_mode= 'nearest')
validation_datagen = ImageDataGenerator(rescale= 1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode= 'grayscale',
target_size= (img_rows,img_cols),
batch_size= batch_size,
class_mode= 'categorical',
shuffle= True,
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
color_mode= 'grayscale',
target_size= (img_rows,img_cols),
batch_size= batch_size,
class_mode= 'categorical',
shuffle= True,
)
model = Sequential()
# Block - 1
model.add(Conv2D(32,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.2))
# Block - 2
model.add(Conv2D(64,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.2))
# Block - 3
model.add(Conv2D(128,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.2))
# Block - 4
model.add(Conv2D(256,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3),padding = 'same', kernel_initializer = 'he_normal', input_shape = (img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.2))
# Block - 5
model.add(Flatten())
model.add(Dense(64, kernel_initializer= 'he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Block - 6
model.add(Dense(64, kernel_initializer= 'he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Block - 7
model.add(Dense(num_classes, kernel_initializer= 'he_normal'))
model.add(Activation('softmax'))
# print(model.summary())
from keras.optimizers import RMSprop, SGD, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
checkpoint = ModelCheckpoint('D:\College Projects\SEM6_proj\Music-Recommendation-using-Facial-Expressions-master\Emotion_little_vgg.h5',
monitor='val_loss',
mode ='min',
save_best_only= True,
verbose=1)
earlystop = EarlyStopping(monitor='val_loss',
min_delta= 0,
patience=3,
verbose=1,
restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor= 0.2,
patience= 3,
verbose=1,
min_delta= 0.0001)
callbacks = [ earlystop, checkpoint, reduce_lr ]
model.compile(loss='categorical_crossentropy',
optimizer= Adam(lr = 0.001),
metrics= ['accuracy'])
nb_train_samples = 24176
nb_validation_samples = 3006
epochs = 25
history = model.fit(
train_generator,
steps_per_epoch= nb_train_samples // batch_size,
epochs= epochs,
callbacks = callbacks,
validation_data= validation_generator,
validation_steps= nb_validation_samples // batch_size)
错误:
2021-03-08 18:01:49.114010: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2021-03-08 18:01:49.114992: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Found 28821 images belonging to 7 classes.
Found 7066 images belonging to 7 classes.
2021-03-08 18:02:09.707097: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-03-08 18:02:09.709548: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'nvcuda.dll'; dlerror: nvcuda.dll not found
2021-03-08 18:02:09.709777: W tensorflow/stream_executor/cuda/cuda_driver.cc:326] failed call to cuInit: UNKNOWN ERROR (303)
2021-03-08 18:02:09.715481: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: DESKTOP-G870SC4
2021-03-08 18:02:09.715834: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: DESKTOP-G870SC4
2021-03-08 18:02:09.717089: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-03-08 18:02:12.330725: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
Epoch 1/25
Traceback (most recent call last):
File "D:\College Projects\SEM6_proj\Music-Recommendation-using-Facial-Expressions-master\training.py", line 146, in <module>
history = model.fit(
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\def_function.py", line 888, in _call
return self._stateless_fn(*args, **kwds)
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\function.py", line 2942, in __call__
return graph_function._call_flat(
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\function.py", line 1918, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\function.py", line 555, in call
outputs = execute.execute(
File "D:\College Projects\SEM6_proj\my_venv2\lib\site-packages\tensorflow\python\eager\execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must be broadcastable: logits_size=[32,5] labels_size=[32,7]
[[node categorical_crossentropy/softmax_cross_entropy_with_logits (defined at \Music-Recommendation-using-Facial-Expressions-master\training.py:146) ]] [Op:__inference_train_function_3059]
Function call stack:
train_function
错误消息报告:
也就是说,你的(我猜是一个热编码的)标签有7列;这确实与上述代码生成的有关数据的打印输出一致:
所以,看起来你的
num_classes
应该是7,而不是5。更改为num_classes = 7
应清除错误作为一般规则(与您的错误无关),混合
keras
和tensorflow.keras
的层,就像您在这里所做的那样,是不好的做法,您不应该这样做;这些实际上是不同的包,所以选择一个,并从它的所有层-不要混合他们相关问题 更多 >
编程相关推荐