python - Tensorboard:无法找到命名范围

标签 python python-3.x tensorflow deep-learning tensorboard

我有一个我命名为 'Pred/Accuracy' 的范围,我似乎无法在 Tensorboard 中找到它。稍后我将包括我的整个代码,但特别是在我对我的成本函数的定义中:

def compute_cost(z, Y, parameters, l2_reg=False):

with tf.name_scope('cost'):
    logits = tf.transpose(z)
    labels = tf.transpose(Y)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, 
                                                                  labels = labels))
    if l2_reg == True:

        reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        cost = cost + tf.reduce_sum(reg)

with tf.name_scope('Pred/Accuracy'):

    prediction=tf.argmax(z)
    correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

return cost, prediction, accuracy

但是在 tensorboard 上,即使我点击成本 block 我也看不到它:

Tensorboard graph

下面基本上是我的全部代码,不包括导入/预处理数据

# Create X and Y placeholders

def create_xy_placeholder(n_x, n_y):
    X = tf.placeholder(tf.float32, shape = [n_x, None], name = 'X')
    Y = tf.placeholder(tf.float32, shape = [n_y, None], name = 'Y')

    return X, Y


# initialize parameters hidden layers

def initialize_parameters(n_x, scale, hidden_units):

    hidden_units= [n_x] + hidden_units
    parameters = {}
    regularizer = tf.contrib.layers.l2_regularizer(scale)

    for i in range(0, len(hidden_units[1:])):
        with tf.variable_scope('hidden_parameters_'+str(i+1)):
            w = tf.get_variable("W"+str(i+1), [hidden_units[i+1], hidden_units[i]], 
                                    initializer=tf.contrib.layers.xavier_initializer(),
                                    regularizer=regularizer)

            b = tf.get_variable("b"+str(i+1), [hidden_units[i+1], 1], 
                                    initializer = tf.constant_initializer(0.1))

            parameters.update({"W"+str(i+1): w})
            parameters.update({"b"+str(i+1): b})

    return parameters


# forward progression with batch norm and dropout

def forward_propagation(X, parameters, batch_norm=False, keep_prob=1):

    a_new = X   

    for i in range(0, int(len(parameters)/2)-1):

        with tf.name_scope('forward_pass_'+str(i+1)):

            w = parameters['W'+str(i+1)]
            b = parameters['b'+str(i+1)]

            z = tf.matmul(w, a_new) + b

            if batch_norm == True:
                z = tf.layers.batch_normalization(z, momentum=0.99, axis=0)

            a = tf.nn.relu(z)

            if keep_prob < 1:
                a = tf.nn.dropout(a, keep_prob)  

            a_new = a

            tf.summary.histogram('act_'+str(i+1), a_new)

    # calculating final Z before input into cost as logit 

    with tf.name_scope('forward_pass_'+str(int(len(parameters)/2))):
        w = parameters['W'+str(int(len(parameters)/2))]
        b = parameters['b'+str(int(len(parameters)/2))]

        z = tf.matmul(w, a_new) + b

        if batch_norm == True:
                z = tf.layers.batch_normalization(z, momentum=0.99, axis=0)

    return z

# compute cost with option for l2 regularizatoin

def compute_cost(z, Y, parameters, l2_reg=False):

    with tf.name_scope('cost'):
        logits = tf.transpose(z)
        labels = tf.transpose(Y)

        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, 
                                                                      labels = labels))
        if l2_reg == True:

            reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

            cost = cost + tf.reduce_sum(reg)

    with tf.name_scope('Pred/Accuracy'):

        prediction=tf.argmax(z)
        correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    return cost, prediction, accuracy


# defining the model (need to add keep_prob for dropout)

def model(X_train, Y_train, X_test, Y_test, 
          hidden_units=[30, 50, 50, 30, 4],            # hidden units/layers
          learning_rate = 0.0001,                       # Learning rate
          num_epochs = 2000, minibatch_size = 30,       # minibatch/ number epochs
          keep_prob=0.5,                                # dropout
          batch_norm=True,                              # batch normalization
          l2_reg=True, scale = 0.01,                    # L2 regularization/scale is lambda
          print_cost = True):

    ops.reset_default_graph()                         # to be able to rerun the model without overwriting tf variables
    tf.set_random_seed(1)                             # to keep consistent results
    seed = 3                                          # to keep consistent results
    (n_x, m) = X_train.shape                          # (n_x: input size, m : number of examples in the train set)
    n_y = Y_train.shape[0]                            # n_y : output size
    costs = []                                        # To keep track of the cost

    logs_path = '/tmp/tensorflow_logs/example/'

    # Create Placeholders of shape (n_x, n_y)
    X, Y = create_xy_placeholder(n_x, n_y)

    # Initialize parameters
    parameters = initialize_parameters(n_x, scale, hidden_units)

    # Forward propagation: Build the forward propagation in the tensorflow graph
    z = forward_propagation(X, parameters, keep_prob, batch_norm)

    # Cost function: Add cost function to tensorflow graph
    cost, prediction, accuracy = compute_cost(z, Y, parameters, l2_reg)

    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
    with tf.name_scope('optimizer'):

        optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)

        # Op to calculate every variable gradient
        grads = tf.gradients(cost, tf.trainable_variables())
        grads = list(zip(grads, tf.trainable_variables()))
        # Op to update all variables according to their gradient
        apply_grads = optimizer.apply_gradients(grads_and_vars = grads)    


    # Initialize all the variables
    init = tf.global_variables_initializer()

    # to view in tensorboard
    tf.summary.scalar('loss', cost)
    tf.summary.scalar('accuracy', accuracy)

    # Create summaries to visualize weights
    for var in tf.trainable_variables():
        tf.summary.histogram(var.name, var)
    # Summarize all gradients
    for grad, var in grads:
        tf.summary.histogram(var.name + '/gradient', grad)

    merged_summary_op = tf.summary.merge_all()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # Start the session to compute the tensorflow graph
    with tf.Session(config=config) as sess:
        # Run the initialization
        sess.run(init)

        # define writer
        summary_writer = tf.summary.FileWriter(logs_path, 
                                               graph=tf.get_default_graph())

        # Do the training loop
        for epoch in range(num_epochs):

            epoch_cost = 0.                       # Defines a cost related to an epoch
            num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

            count = 0

            for minibatch in minibatches:

                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch

                # IMPORTANT: The line that runs the graph on a minibatch.
                # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).

                _ , minibatch_cost, summary = sess.run([apply_grads, cost, 
                                                        merged_summary_op], 
                                              feed_dict = {X: minibatch_X, Y: minibatch_Y})

                epoch_cost += minibatch_cost / num_minibatches

                # Write logs at every iteration
                summary_writer.add_summary(summary, epoch * num_minibatches + count)

                count += 1

            # Print the cost every epoch
            if print_cost == True and epoch % 100 == 0:
                print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
                prediction1=tf.argmax(z)
#                print('Z5: ', Z5.eval(feed_dict={X: minibatch_X, Y: minibatch_Y}))
                print('prediction: ', prediction1.eval(feed_dict={X: minibatch_X, 
                                                                  Y: minibatch_Y}))

                correct1=tf.argmax(Y)
#                print('Y: ', Y.eval(feed_dict={X: minibatch_X, 
#                                                            Y: minibatch_Y}))
                print('correct: ', correct1.eval(feed_dict={X: minibatch_X, 
                                                            Y: minibatch_Y}))

            if print_cost == True and epoch % 5 == 0:
                costs.append(epoch_cost)

        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # lets save the parameters in a variable
        parameters = sess.run(parameters)
        print ("Parameters have been trained!")

        # Calculate the correct predictions
        correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))

        # Calculate accuracy on the test set

        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
        print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))

        print("Run the command line:\n" \
          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
          "\nThen open http://0.0.0.0:6006/ into your web browser")



        return parameters


# run model on test data


parameters = model(x_train, y_train, x_test, y_test, keep_prob=1)

最佳答案

Tensorflow 范围是分层的:您可以在另一个范围内的另一个范围内有一个范围,等等。名称 "Pred/Accuracy" 的意思就是:您有一个顶级 "Pred " scope 和 "Accuracy" 嵌套作用域(这是因为斜杠在命名中有特殊含义)。

Tensorboard 默认显示最上面的:"Pred"(在顶部)、"batch_normalization" 等。您可以展开它们以查看其中的内容双击。在 "Pred" 中,您应该找到 "Accuracy"

如果您愿意,只需以不同的方式命名您的范围,例如"Pred_Accuracy",全名会出现在tensorboard中。

关于python - Tensorboard:无法找到命名范围,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50082220/

相关文章:

python - Django Rest Framework 如何禁止用户更改用户名?

python - Django:多账户,每个账户下有多个用户,但是账户特定的数据

python - Pylint 给我 "Final new line missing"

Python 3 列表的副作用?

python - 抽象基类的注册方法实际上做了什么?

python - Tensorflow:确定预训练 CNN 模型的输出步幅

python - 将字符串映射到对列表

python - pygame : Why does calling a function inside the game loop inside Game loop make my game lag?

python - tensorflow : ValueError: Shape must be rank 2 but is rank 3

python - ModuleNotFoundError : No module named 'tensorflow.contrib' with tensorflow=2. 0.0