如何求张量流中张量的值

2024-10-04 03:19:18 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在训练对医学数据(图像)执行CNN算法,我需要恢复最后一层的张量值以执行其他计算

def _create_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1):

inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel])  # shape=(?, 32, 32, 1)

#inputX= tf.keras.backend.reshape(X, [-1, image_z, image_width, image_height, image_channel])

#print('inputs', inputX.shape)
# Vnet model
# layer1->convolution
layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop,
                           scope='layer0')
layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop,
                           scope='layer1')
layer1 = resnet_Add(x1=layer0, x2=layer1)
# down sampling1
down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1')
# layer2->convolution
layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_1')
layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_2')
layer2 = resnet_Add(x1=down1, x2=layer2)
# down sampling2
down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2')
# layer3->convolution
layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_1')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_2')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_3')
layer3 = resnet_Add(x1=down2, x2=layer3)
# down sampling3
down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3')
# layer4->convolution
layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_1')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_2')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_3')
layer4 = resnet_Add(x1=down3, x2=layer4)
# down sampling4
down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4')
# layer5->convolution
layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_1')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_2')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_3')
layer5 = resnet_Add(x1=down4, x2=layer5)

# layer9->deconvolution
deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1')
# layer8->convolution
layer6 = crop_and_concat(layer4, deconv1)
_, Z, H, W, _ = layer4.get_shape().as_list()
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_1')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_2')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_3')
layer6 = resnet_Add(x1=deconv1, x2=layer6)
# layer9->deconvolution
deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2')
# layer8->convolution
layer7 = crop_and_concat(layer3, deconv2)
_, Z, H, W, _ = layer3.get_shape().as_list()
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_1')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_2')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_3')
layer7 = resnet_Add(x1=deconv2, x2=layer7)
# layer9->deconvolution
deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3')
# layer8->convolution
layer8 = crop_and_concat(layer2, deconv3)
_, Z, H, W, _ = layer2.get_shape().as_list()
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_1')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_2')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_3')
layer8 = resnet_Add(x1=deconv3, x2=layer8)
# layer9->deconvolution
deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4')
# layer8->convolution
layer9 = crop_and_concat(layer1, deconv4)
_, Z, H, W, _ = layer1.get_shape().as_list()
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_1')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_2')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_3')
layer9 = resnet_Add(x1=deconv4, x2=layer9)
# layer14->output
output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 32, n_class), scope='output')
y =tf.shape(output_map)
#print('output map shape of output', y)

sess = tf.InteractiveSession()
print(output_map.eval())

'''with tf.Session() as s:
         tf.initialize_all_variables().run()
         xx= tf.rank(output_map)
         print ('rank_output_map is ',s.run(xx))'''


return output_map

我用两种方法得到张量的值:

  1. 张量值()
  2. session.run(张量)

但如果你能帮我,我也会犯同样的错误。 enter image description here


Tags: imagewidthdropreluscopeheightbnconv
1条回答
网友
1楼 · 发布于 2024-10-04 03:19:18

您只需在张量上执行sess.run即可获得值。首先你需要张量。您可以在build_模型中通过添加name参数(可以对任何张量执行此操作)为其命名,例如:

Layer_name = tf.add(tf.multiply(Flat, W1), b1, name="Layer_name")

稍后,您可以获取层的张量并对其求值:

with tf.Session() as sess:
    Layer_name = tf.get_default_graph().get_tensor_by_name('Layer_name:0')
    FC1_values = sess.run(Layer_name, feed_dict={x: input_img_arr})

相关问题 更多 >