# Which param decide input tensor size?

In my CNN network, I doesn't define the input tensor size, but why does the net network have six input tensors as showed in Tensorboard (cf. picture)?

and in Tensorboard Histogram, for `conv1_5`, `conv2_5`, `conv4_5`, `conv4_5`, `conv5_5`, `conv7_5`, `conv8_5`, ... the last column tensors weights and bias are changed by step, while other tensors params are just constant (data of step 0).

``````def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

def weight_variable(shape, alias):
initial = tf.truncated_normal(shape, stddev=0.1, name=alias)
var = tf.Variable(initial)
name_scope = tf.contrib.framework.get_name_scope()
tf.summary.histogram(name_scope + '/weights', var)
return var

def bias_variable(shape, alias):
initial = tf.constant(0.1, shape=shape, name=alias)
var = tf.Variable(initial)
name_scope = tf.contrib.framework.get_name_scope()
tf.summary.histogram(name_scope + '/bias', var)
return tf.Variable(initial)

def cnn(x):
#[n, 28, 28, 3]
with tf.name_scope('reshape1'):
x_image = tf.reshape(x, [-1, 28, 28, 3])

#[n, 28, 28, 32]
with tf.name_scope('conv1'):
W_conv1 = weight_variable([3, 3, 3, 32], 'weight')
b_conv1 = bias_variable([32], 'bias')
layer_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

#[n, 28, 28, 32]
with tf.name_scope('conv2'):
W_conv2 = weight_variable([3, 3, 32, 32], 'weight')
b_conv2 = bias_variable([32], 'bias')
layer_conv2 = tf.nn.relu(conv2d(layer_conv1, W_conv2) + b_conv2)

#[n, 14, 14, 32]
with tf.name_scope('pool3'):
layer_pool3 = max_pool_2x2(layer_conv2)

#[n, 14, 14, 64]
with tf.name_scope('conv4'):
W_conv4 = weight_variable([3, 3, 32, 64], 'weight')
b_conv4 = bias_variable([64], 'bias')
layer_conv4 = tf.nn.relu(conv2d(layer_pool3, W_conv4) + b_conv4)

#[n, 14, 14, 64]
with tf.name_scope('conv5'):
W_conv5 = weight_variable([3, 3, 64, 64], 'weight')
b_conv5 = bias_variable([64], 'bias')
layer_conv5 = tf.nn.relu(conv2d(layer_conv4, W_conv5) + b_conv5)

#[n, 7, 7, 64]
with tf.name_scope('pool6'):
layer_pool6 = max_pool_2x2(layer_conv5)

#[n, 7, 7, 128]
print(layer_pool6.shape)
with tf.name_scope('conv7'):
W_conv7 = weight_variable([3, 3, 64, 128], 'weight')
b_conv7 = bias_variable([128], 'bias')
layer_conv7 = tf.nn.relu(conv2d(layer_pool6, W_conv7) + b_conv7)

#[n, 7, 7, 128]
with tf.name_scope('conv8'):
W_conv8 = weight_variable([3, 3, 128, 128], 'weight')
b_conv8 = bias_variable([128], 'bias')
layer_conv8 = tf.nn.relu(conv2d(layer_conv7, W_conv8) + b_conv8)

#[n, 7, 7, 128]
with tf.name_scope('reshape9'):
layer_reshape9 = tf.reshape(layer_conv8, [-1, 7 * 7 * 128])

#[n, 7, 7, 128]
with tf.name_scope('fc10'):
W_fc10 = weight_variable([7 * 7 * 128, 1024], 'weight')
b_fc10 = bias_variable([1024], 'bias')
layer_fc10 = tf.nn.relu(tf.matmul(layer_reshape9, W_fc10) + b_fc10)

with tf.name_scope('dropout11'):
dropout_prob = tf.placeholder(tf.float32)
layer_dropout11 = tf.nn.dropout(layer_fc10, dropout_prob, name='prob')

with tf.name_scope('fc12'):
W_fc12 = weight_variable([1024, 10], 'weight')
b_fc12 = bias_variable([10], 'bias')
y = tf.matmul(layer_dropout11, W_fc12) + b_fc12

#return y
return y, dropout_prob

def next_batch_set(images, labels, batch_size=128):
"""Generate a batch training data.

Args:
images: A 4-D array representing the training images.
labels: A 1-D array representing the classes of images.
batch_size: An integer.

Return:
batch_images: A batch of images.
batch_labels: A batch of labels.
"""
indices = np.random.choice(len(images), batch_size)
batch_images = images[indices]
batch_labels = labels[indices]
return batch_images, batch_labels

x = tf.placeholder(tf.float32, shape=[None, 28, 28, 3], name='inputs')
y_ = tf.placeholder(tf.int32, shape=[None], name='labels')

x = preprocess(x)

predict_labels, dropout_prob = cnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=predict_labels, labels=y_))

with tf.name_scope('accuracy'):
logits = tf.nn.softmax(predict_labels)
classes = tf.cast(tf.argmax(logits, axis=1), dtype=tf.int32, name='predict')
classes_ = tf.identity(classes, name='classes')
accuracy = tf.reduce_mean(tf.cast(tf.equal(classes, y_), 'float'))

graph = tf.get_default_graph()
conv1_var = graph.get_tensor_by_name('conv1/Variable:0')

saver = tf.train.Saver()

merged = tf.summary.merge_all()

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#选定可视化存储目录
writer = tf.summary.FileWriter("./logs/",sess.graph)

for i in range(8000):
batch_images, batch_labels = next_batch_set(train_images, train_targets)
train_dict = {x: batch_images, y_: batch_labels, dropout_prob: 1}
#train_dict = {x: batch_images, y_: batch_labels}
sess.run(train_step, feed_dict=train_dict)

if i % 100 == 0:
loss_, acc_ , classes_= sess.run([cross_entropy, accuracy, classes], feed_dict=train_dict)
train_text = 'step: {}, loss: {}, acc: {}, class: {}, labels: {}'.format(i, loss_, acc_, classes_, batch_labels)
print(train_text)
result = sess.run(merged, feed_dict=train_dict)