tf 版本:2.3.0
import numpy as np
import tensorflow as tf
from tf_agents.agents.reinforce import reinforce_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym, tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
env_name='CartPole-v0'
num_iterations=1
collect_episodes_per_iteration=2
replay_buffer_capacity=2000
fc_layer_params=(100, )
learning_rate=1e-3
log_interval=5
num_eval_episodes=10
eval_interval=10
env=suite_gym.load(env_name)
env.reset()
time_step=env.reset()
train_py_env=suite_gym.load(env_name)
train_env=tf_py_environment.TFPyEnvironment(train_py_env)
actor_net=actor_distribution_network.ActorDistributionNetwork(train_env.observation_spec(), train_env.action_spec(), fc_layer_params=fc_layer_params)
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter=tf.compat.v2.Variable(0)
tf_agent=reinforce_agent.ReinforceAgent(train_env.time_step_spec(),
train_env.action_spec(),
actor_network=actor_net,
optimizer=optimizer,
normalize_returns=True,
train_step_counter=train_step_counter)
tf_agent.initialize()
eval_policy=tf_agent.policy
collect_policy=tf_agent.collect_policy
replay_buffer=tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity
)
tf_agent.train=common.function(tf_agent.train)
def collect_episode(environment, policy, num_episodes):
episode_counter=0
environment.reset()
while episode_counter<num_episodes:
time_step=environment.current_time_step()
action_step=policy.action(time_step)
next_time_step=environment.step(action_step.action)
traj=trajectory.from_transition(time_step, action_step, next_time_step)
replay_buffer.add_batch(traj)
if traj.is_boundary():
episode_counter+=1
collect_episode(train_env, tf_agent.collect_policy, 1)
experience=replay_buffer.gather_all()
for _ in range(num_iterations):
collect_episode(train_env, tf_agent.collect_policy, collect_episodes_per_iteration)
from copy import copy
before=copy(tf_agent.trainable_variables)
experience=replay_buffer.gather_all()
train_loss=tf_agent.train(experience)
replay_buffer.clear()
after=copy(tf_agent.trainable_variables)
print('before==after?', before==after)
https://www.tensorflow.org/agents/tutorials/6_reinforce_tutorial
我一直在关注 TFAgents 的教程,但我发现
before=copy(tf_agent.trainable_variables)
tf_agent.train(experience)
after=copy(tf_agent.trainable_variables)
然后“之前”应该与“之后”不同。 但是 (before==after) 总是代表 'True'。
我对此很困惑。我认为梯度可能为零。
然而,这是不合理的,导致模型的损失在训练步骤中持续减少。
在reinforce_agent模块上,gradient tape step写的很好..
我找不到问题所在... 无论训练步骤如何,甚至 tf_agent.policy.trainable_variables 都是相同的。
最佳答案
原因是 tf_agent.trainable_variables
是 tf.Variables(张量)的元组。使用 copy
复制这些张量并没有达到您的预期,发生的是 before
中的张量随着代理训练而更新。要真正看到差异,请尝试以下代码:
before = []
for element in tf_agent.trainable_variables:
before.append(tf.identity(element))
tf_agent.train(experience)
after = []
for element in tf_agent.trainable_variables:
after.append(tf.identity(element))
print(before == after)
关于tensorflow - 为什么 tf_agent 变量即使在训练后也不会改变?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/65199569/