如何创建多个输入和输出的keras函数api?

2024-10-01 09:38:36 发布

您现在位置:Python中文网/ 问答频道 /正文

该网络由编码器、lstm和解码器组成。 编码器获取一个输入图像序列(时间分布),并提取两个不同形状的输出。 一个用于lstm并将其传递给解码器,另一个直接用于解码器。 解码器接收两个输入,输出一个具有相同形状的平均图像和输入序列。 如何为这种结构编写keras函数api

enter image description here

这是我尝试过的示例代码:

class CustomEncoder(Architecture):
def __init__(self, inputShape=(128, 128, 1), batchSize=None,
             latentSize=1000,  training=None):

     self.training=training
    super().__init__(inputShape, batchSize )

def Build(self):
    inLayer = Input(self.inputShape, self.batchSize)

    net = Conv2D(64, kernelSize=15, strides=4)(inLayer, training=self.training) # 

    net = Conv2D(64, kernelSize=11, strides=2)(net, training=self.training)    
    net = Conv2D(128, kernelSize=7, strides=2)(net, training=self.training) 

    ...

    sample = ...

    return Model(inputs=inLayer, outputs=[sample, net])



class CustomDecoder(Architecture):
def __init__(self, inputShape=(128, 128, 1), batchSize=None, latentSize=1000, training=None):
    self.training=training
    super().__init__(inputShape, batchSize, latentSize)

def Build(self):
    # input layer is from GlobalAveragePooling:
    inLayerA = Input([self.latentSize], self.batchSize)
    inLayerB = Input([8, 8, 64], self.batchSize)

    neta = Reshape((1, 1, self.latentSize))(inLayerA)

    neta = UpSampling2D((self.inputShape[0]//16, self.inputShape[1]//16))(neta)

    neta = DeconvBeta(128, kernelSize=7)(neta, training=self.training) 

    neta = DeconvBeta(64, kernelSize=11)(neta, training=self.training) 

    neta = DeconvBeta(64, kernelSize=15, strides=4)(neta, training=self.training) 

    netb = DeconvBeta(128, kernelSize=7)(inLayerB, training=self.training) 

    netb = DeconvBeta(64, kernelSize=11)(netb, training=self.training) 

    netb = DeconvBeta(64, kernelSize=15, strides=4)(netb, training=self.training) 


    neta = Conv2DTranspose(filters=1, kernel_size=15,
                  padding='same', activation="tanh")(neta)
    netb = Conv2DTranspose(filters=1, kernel_size=15,
                  padding='same', activation="tanh")(netb)
    net = Average([neta, netb])
    return Model([inLayerA, inLayerB], net)

class ConvLSTMBeta(Architecture):

def __init__(self, inputShape=(None, 8, 8, 64), batchSize=None, latentSize=1000, training=None):

    self.training=training
    super().__init__(inputShape, batchSize, latentSize)

def Build(self):
    # create the input layer for feeding the netowrk
    inLayer = Input(self.inputShape, self.batchSize)
    net = (ConvLSTM2D(64, (3, 3), padding="same",dropout=0.2, recurrent_dropout=0.2, return_sequences=True))(inLayer) # 1

    return Model(inputs=inLayer, outputs=net)

Tags: selfnonenetinitdeftrainingstridesbatchsize