ReinFlow commited on
Commit
5a437ce
·
verified ·
1 Parent(s): 8179451

Upload 291 files

Browse files

First commit, upload the data sets for OpenAI Gym tasks which is processed from D4RL. Also upload all pre-trained checkpoints, including 1-ReFlow, Shortcut Policy, and DDPM trained on OpenAI Gym tasks, Franka Kitchen, and Robomimic visual manipulation tasks.

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data-offline/gym_custom/ant-medium-expert-v0/normalization.npz +3 -0
  2. data-offline/gym_custom/ant-medium-expert-v0/train.npz +3 -0
  3. data-offline/gym_d4rl/Humanoid-medium-v3/normalization.npz +3 -0
  4. data-offline/gym_d4rl/Humanoid-medium-v3/train.npz +3 -0
  5. data-offline/gym_d4rl/ant-medium-expert-v2/normalization.npz +3 -0
  6. data-offline/gym_d4rl/ant-medium-expert-v2/train.npz +3 -0
  7. data-offline/gym_d4rl/hopper-medium-v2/normalization.npz +3 -0
  8. data-offline/gym_d4rl/hopper-medium-v2/train.npz +3 -0
  9. data-offline/gym_d4rl/walker-medium-v2/normalization.npz +3 -0
  10. data-offline/gym_d4rl/walker-medium-v2/train.npz +3 -0
  11. data-offline/gym_d4rl_normalization/Humanoid-medium-v3/normalization.npz +3 -0
  12. data-offline/gym_d4rl_normalization/ant-medium-expert-v2/normalization.npz +3 -0
  13. data-offline/gym_d4rl_normalization/hopper-medium-v2/normalization.npz +3 -0
  14. data-offline/gym_d4rl_normalization/walker-medium-v2/normalization.npz +3 -0
  15. log/log_gym_d4rl_pretrained/Humanoid-medium-v3/1-ReFlow/state_100.pt +3 -0
  16. log/log_gym_d4rl_pretrained/Humanoid-medium-v3/DDPM/state_120.pt +3 -0
  17. log/log_gym_d4rl_pretrained/Humanoid-medium-v3/Shortcut/state_60.pt +3 -0
  18. log/log_gym_d4rl_pretrained/ant-medium-expert-v2/1-ReFlow/state_50.pt +3 -0
  19. log/log_gym_d4rl_pretrained/ant-medium-expert-v2/DDPM/state_20.pt +3 -0
  20. log/log_gym_d4rl_pretrained/ant-medium-expert-v2/Shortcut/state_50.pt +3 -0
  21. log/log_gym_d4rl_pretrained/hopper-medium-v2/1-ReFlow/state_40.pt +3 -0
  22. log/log_gym_d4rl_pretrained/hopper-medium-v2/DDPM/state_120.pt +3 -0
  23. log/log_gym_d4rl_pretrained/hopper-medium-v2/Shortcut/state_40.pt +3 -0
  24. log/log_gym_d4rl_pretrained/walker2d-medium-v2/1-ReFlow/state_80.pt +3 -0
  25. log/log_gym_d4rl_pretrained/walker2d-medium-v2/DDPM/state_60.pt +3 -0
  26. log/log_gym_d4rl_pretrained/walker2d-medium-v2/Shortcut/state_40.pt +3 -0
  27. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/config.yaml +129 -0
  28. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/hydra.yaml +166 -0
  29. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/overrides.yaml +9 -0
  30. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/architecture.log +76 -0
  31. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/checkpoint/best.pt +3 -0
  32. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/checkpoint/state_999.pt +3 -0
  33. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/explore_noise.png +3 -0
  34. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/result.pkl +3 -0
  35. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/run.log +0 -0
  36. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/test_lr_schedulers.png +3 -0
  37. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/config.yaml +129 -0
  38. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/hydra.yaml +166 -0
  39. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/overrides.yaml +9 -0
  40. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/architecture.log +76 -0
  41. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/checkpoint/best.pt +3 -0
  42. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/checkpoint/state_999.pt +3 -0
  43. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/explore_noise.png +3 -0
  44. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/result.pkl +3 -0
  45. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/run.log +0 -0
  46. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/test_lr_schedulers.png +3 -0
  47. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/config.yaml +129 -0
  48. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/hydra.yaml +165 -0
  49. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/overrides.yaml +8 -0
  50. log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/architecture.log +76 -0
data-offline/gym_custom/ant-medium-expert-v0/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1796c4c4e0a34bbbc9710636bbbd7ec2a22d2189affed7f72ea46994d03d4e2e
3
+ size 1970
data-offline/gym_custom/ant-medium-expert-v0/train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d29bdcaf5f17ba311173b5206e1e23b534d125b0fb87098e722a66cf2fdb7cc3
3
+ size 180538900
data-offline/gym_d4rl/Humanoid-medium-v3/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1c6fdf99af3550dd259d893fdf497ca880f05b7570f08b2a65d554224ff6f3
3
+ size 4162
data-offline/gym_d4rl/Humanoid-medium-v3/train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e15906b8b3802639a7c6568706585b0dcf53db148e6ec2748536777ec9fb3a11
3
+ size 393493669
data-offline/gym_d4rl/ant-medium-expert-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc91a243893ee4d6d6ca562335c8c79c379b0f4f8d1ce81f572650ef84ad5b5e
3
+ size 1970
data-offline/gym_d4rl/ant-medium-expert-v2/train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152c1139db6a779f92460960e28b6e319e7ee90e68ae8081ec2a2fb79bf00413
3
+ size 171507921
data-offline/gym_d4rl/hopper-medium-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05b2943bc39772f7f770dfc0a4df2a9f205cb1e05ae74b585cd0dd382a0742e
3
+ size 1130
data-offline/gym_d4rl/hopper-medium-v2/train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ae5aad98481c5fcb7a1de25536ea907a205f65f8344dc1b02e67db6957c71e
3
+ size 37208613
data-offline/gym_d4rl/walker-medium-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d103b793b591081947fca807ae13c2752bd6964621374101f5a89a19346ae16
3
+ size 1202
data-offline/gym_d4rl/walker-medium-v2/train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f32074e11902432f3847146890c09e8854200000103f05b9398dc38cff4f864
3
+ size 86163981
data-offline/gym_d4rl_normalization/Humanoid-medium-v3/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1c6fdf99af3550dd259d893fdf497ca880f05b7570f08b2a65d554224ff6f3
3
+ size 4162
data-offline/gym_d4rl_normalization/ant-medium-expert-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc91a243893ee4d6d6ca562335c8c79c379b0f4f8d1ce81f572650ef84ad5b5e
3
+ size 1970
data-offline/gym_d4rl_normalization/hopper-medium-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05b2943bc39772f7f770dfc0a4df2a9f205cb1e05ae74b585cd0dd382a0742e
3
+ size 1130
data-offline/gym_d4rl_normalization/walker-medium-v2/normalization.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d103b793b591081947fca807ae13c2752bd6964621374101f5a89a19346ae16
3
+ size 1202
log/log_gym_d4rl_pretrained/Humanoid-medium-v3/1-ReFlow/state_100.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf3a7eff02f5a0fa8a53b78ae40fc300462e18a4203e7b125712af2b98b24cb
3
+ size 12776994
log/log_gym_d4rl_pretrained/Humanoid-medium-v3/DDPM/state_120.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198a3343d89a78a5c0b5c62f89b49e28b64cedec90802f2b52095ae91fab5d8c
3
+ size 12776994
log/log_gym_d4rl_pretrained/Humanoid-medium-v3/Shortcut/state_60.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bd42a8043c8ab2b5d9d9c6309107c577a8ffeb78f014d720ab802e5485041c3
3
+ size 10101818
log/log_gym_d4rl_pretrained/ant-medium-expert-v2/1-ReFlow/state_50.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe57f55927f3f2b10d861d9d40288f8c0396cfaafde47709019a287f779b9926
3
+ size 10015714
log/log_gym_d4rl_pretrained/ant-medium-expert-v2/DDPM/state_20.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19a4d8374eb29758156c044b439ca92fa55af3ef652aa2c6828d3b12989611a7
3
+ size 10015714
log/log_gym_d4rl_pretrained/ant-medium-expert-v2/Shortcut/state_50.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a590e50ded998af5ea15870ababc050408d98a0cd7374e211c5cf470b60977a6
3
+ size 9240122
log/log_gym_d4rl_pretrained/hopper-medium-v2/1-ReFlow/state_40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39808c2deab2e0ba275127d1b10d71098ecd747ca32f56284bd461ec6b0caccf
3
+ size 8865250
log/log_gym_d4rl_pretrained/hopper-medium-v2/DDPM/state_120.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb512ba75abf13c61a703617b1dbe05e6ac402ad07d09b6eddb9de545f75a807
3
+ size 8865250
log/log_gym_d4rl_pretrained/hopper-medium-v2/Shortcut/state_40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202a3ded34bcf5a9f592513ab8bc2765f2189986932cad6fa4c183a72ed26516
3
+ size 8805354
log/log_gym_d4rl_pretrained/walker2d-medium-v2/1-ReFlow/state_80.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e2c0230245ce5aed46806df614e278f39845aea68b9146169661ac020b0768
3
+ size 9111266
log/log_gym_d4rl_pretrained/walker2d-medium-v2/DDPM/state_60.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fed53ceaf5fc10d3e51b64e4c083e7cc7577490d39d6d14f4c8f40ad3de17b8d
3
+ size 9111266
log/log_gym_d4rl_pretrained/walker2d-medium-v2/Shortcut/state_40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7204a4d80367467e8ce3bbdfc81922a5eee92f3dabbc2d13b939e9aaddf6539
3
+ size 9008362
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/config.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ env_name: ant-medium-expert-v0
2
+ action_dim: 8
3
+ horizon_steps: 4
4
+ act_steps: 4
5
+ obs_dim: 111
6
+ cond_steps: 1
7
+ _target_: agent.finetune.mine.train_ppo_flow_agent.TrainPPOFlowAgent
8
+ name: ${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}
9
+ logdir: ${oc.env:DPPO_LOG_DIR}/gym/gym-finetune/${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}_tdf${ft_denoising_steps}/${now:%Y-%m-%d}_${now:%H-%M-%S}_seed${seed}
10
+ base_policy_path: ${oc.env:DPPO_LOG_DIR}/gym/gym-pretrain/ant-medium-expert-v0_pre_reflow_mlp_ta4_td20/2025-03-29_19-23-57_42/checkpoint/best_769.pt
11
+ normalization_path: ${oc.env:DPPO_DATA_DIR}/gym/${env_name}/normalization.npz
12
+ seed: 0
13
+ device: cuda:0
14
+ denoising_steps: 4
15
+ ft_denoising_steps: 4
16
+ env:
17
+ n_envs: 40
18
+ name: ${env_name}
19
+ max_episode_steps: 1000
20
+ reset_at_iteration: false
21
+ save_video: false
22
+ best_reward_threshold_for_success: 3
23
+ wrappers:
24
+ mujoco_locomotion_lowdim:
25
+ normalization_path: ${normalization_path}
26
+ multi_step:
27
+ n_obs_steps: ${cond_steps}
28
+ n_action_steps: ${act_steps}
29
+ max_episode_steps: ${env.max_episode_steps}
30
+ reset_within_step: true
31
+ wandb:
32
+ entity: ${oc.env:DPPO_WANDB_ENTITY}
33
+ project: gym-${env_name}-finetune
34
+ run: ${now:%Y-%m-%d}_${now:%H-%M-%S}_${name}
35
+ offline_mode: false
36
+ train:
37
+ n_train_itr: 1000
38
+ n_critic_warmup_itr: 0
39
+ n_steps: 500
40
+ gamma: 0.99
41
+ actor_lr: 4.5e-05
42
+ actor_weight_decay: 0
43
+ actor_lr_scheduler:
44
+ type: cosine
45
+ first_cycle_steps: 100
46
+ warmup_steps: 10
47
+ min_lr: 2.0e-05
48
+ critic_lr: 0.00065
49
+ critic_weight_decay: 1.0e-05
50
+ critic_lr_scheduler:
51
+ type: cosine
52
+ first_cycle_steps: 100
53
+ warmup_steps: 10
54
+ min_lr: 0.0003
55
+ save_model_freq: 300
56
+ val_freq: 10
57
+ render:
58
+ freq: 1
59
+ num: 0
60
+ reward_scale_running: true
61
+ reward_scale_const: 1.0
62
+ gae_lambda: 0.95
63
+ batch_size: 50000
64
+ update_epochs: 5
65
+ vf_coef: 0.5
66
+ ent_coef: 0.03
67
+ target_kl: 1.0
68
+ lr_schedule: fixed
69
+ repeat_samples: true
70
+ verbose: false
71
+ clip_intermediate_actions: true
72
+ account_for_initial_stochasticity: true
73
+ model:
74
+ _target_: model.flow.ft_ppo.ppoflow.PPOFlow
75
+ device: ${device}
76
+ policy:
77
+ _target_: model.flow.mlp_flow.FlowMLP
78
+ horizon_steps: ${horizon_steps}
79
+ action_dim: ${action_dim}
80
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
81
+ time_dim: 16
82
+ mlp_dims:
83
+ - 512
84
+ - 512
85
+ - 512
86
+ activation_type: ReLU
87
+ out_activation_type: Identity
88
+ use_layernorm: false
89
+ residual_style: true
90
+ critic:
91
+ _target_: model.common.critic.CriticObs
92
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
93
+ mlp_dims:
94
+ - 256
95
+ - 256
96
+ - 256
97
+ activation_type: Mish
98
+ residual_style: true
99
+ actor_policy_path: ${base_policy_path}
100
+ act_dim: ${action_dim}
101
+ horizon_steps: ${horizon_steps}
102
+ act_min: -1
103
+ act_max: 1
104
+ obs_dim: ${obs_dim}
105
+ cond_steps: ${cond_steps}
106
+ noise_scheduler_type: learn_decay
107
+ inference_steps: ${denoising_steps}
108
+ ft_denoising_steps: ${ft_denoising_steps}
109
+ randn_clip_value: 3
110
+ min_sampling_denoising_std: 0.08
111
+ min_logprob_denoising_std: 0.08
112
+ max_logprob_denoising_std: 0.16
113
+ logprob_min: -1.0
114
+ logprob_max: 1.0
115
+ clip_ploss_coef: 0.01
116
+ clip_ploss_coef_base: 0.01
117
+ clip_ploss_coef_rate: 3
118
+ clip_vloss_coef: null
119
+ denoised_clip_value: 1.0
120
+ time_dim_explore: 0
121
+ learn_explore_time_embedding: false
122
+ use_time_independent_noise: false
123
+ init_time_embedding: true
124
+ noise_hidden_dims:
125
+ - 64
126
+ - 64
127
+ logprob_debug_sample: false
128
+ logprob_debug_recalculate: false
129
+ explore_net_activation_type: Tanh
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/hydra.yaml ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${logdir}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task:
115
+ - device=cuda:0
116
+ - model.use_time_independent_noise=False
117
+ - model.noise_hidden_dims=[64,64]
118
+ - model.min_sampling_denoising_std=0.08
119
+ - model.min_logprob_denoising_std=0.08
120
+ - model.max_logprob_denoising_std=0.16
121
+ - train.ent_coef=0.03
122
+ - model.noise_scheduler_type=learn_decay
123
+ - seed=0
124
+ job:
125
+ name: run
126
+ chdir: null
127
+ override_dirname: device=cuda:0,model.max_logprob_denoising_std=0.16,model.min_logprob_denoising_std=0.08,model.min_sampling_denoising_std=0.08,model.noise_hidden_dims=[64,64],model.noise_scheduler_type=learn_decay,model.use_time_independent_noise=False,seed=0,train.ent_coef=0.03
128
+ id: ???
129
+ num: ???
130
+ config_name: ft_ppo_reflow_mlp_learn
131
+ env_set: {}
132
+ env_copy: []
133
+ config:
134
+ override_dirname:
135
+ kv_sep: '='
136
+ item_sep: ','
137
+ exclude_keys: []
138
+ runtime:
139
+ version: 1.3.2
140
+ version_base: '1.3'
141
+ cwd: /home/zhangtonghe/dppo
142
+ config_sources:
143
+ - path: hydra.conf
144
+ schema: pkg
145
+ provider: hydra
146
+ - path: /home/zhangtonghe/dppo/cfg
147
+ schema: file
148
+ provider: main
149
+ - path: /home/zhangtonghe/dppo/cfg/gym/finetune/ant-medium-expert-v0
150
+ schema: file
151
+ provider: command-line
152
+ - path: ''
153
+ schema: structured
154
+ provider: schema
155
+ output_dir: /home/zhangtonghe/dppo/log/gym/gym-finetune/ant-medium-expert-v0_ppo_reflow_mlp_ta4_td4_tdf4/2025-04-20_20-30-16_seed0
156
+ choices:
157
+ hydra/env: default
158
+ hydra/callbacks: null
159
+ hydra/job_logging: default
160
+ hydra/hydra_logging: default
161
+ hydra/hydra_help: default
162
+ hydra/help: default
163
+ hydra/sweeper: basic
164
+ hydra/launcher: basic
165
+ hydra/output: default
166
+ verbose: false
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/.hydra/overrides.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ - device=cuda:0
2
+ - model.use_time_independent_noise=False
3
+ - model.noise_hidden_dims=[64,64]
4
+ - model.min_sampling_denoising_std=0.08
5
+ - model.min_logprob_denoising_std=0.08
6
+ - model.max_logprob_denoising_std=0.16
7
+ - train.ent_coef=0.03
8
+ - model.noise_scheduler_type=learn_decay
9
+ - seed=0
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/architecture.log ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ self.model=PPOFlow(
2
+ (actor_old): FlowMLP(
3
+ (time_embedding): Sequential(
4
+ (0): SinusoidalPosEmb()
5
+ (1): Linear(in_features=16, out_features=32, bias=True)
6
+ (2): Mish()
7
+ (3): Linear(in_features=32, out_features=16, bias=True)
8
+ )
9
+ (mlp_mean): ResidualMLP(
10
+ (layers): ModuleList(
11
+ (0): Linear(in_features=159, out_features=512, bias=True)
12
+ (1): TwoLayerPreActivationResNetLinear(
13
+ (l1): Linear(in_features=512, out_features=512, bias=True)
14
+ (l2): Linear(in_features=512, out_features=512, bias=True)
15
+ (act): ReLU()
16
+ )
17
+ (2): Linear(in_features=512, out_features=32, bias=True)
18
+ (3): Identity()
19
+ )
20
+ )
21
+ )
22
+ (actor_ft): NoisyFlowMLP(
23
+ (policy): FlowMLP(
24
+ (time_embedding): Sequential(
25
+ (0): SinusoidalPosEmb()
26
+ (1): Linear(in_features=16, out_features=32, bias=True)
27
+ (2): Mish()
28
+ (3): Linear(in_features=32, out_features=16, bias=True)
29
+ )
30
+ (mlp_mean): ResidualMLP(
31
+ (layers): ModuleList(
32
+ (0): Linear(in_features=159, out_features=512, bias=True)
33
+ (1): TwoLayerPreActivationResNetLinear(
34
+ (l1): Linear(in_features=512, out_features=512, bias=True)
35
+ (l2): Linear(in_features=512, out_features=512, bias=True)
36
+ (act): ReLU()
37
+ )
38
+ (2): Linear(in_features=512, out_features=32, bias=True)
39
+ (3): Identity()
40
+ )
41
+ )
42
+ )
43
+ (explore_noise_net): ExploreNoiseNet(
44
+ (mlp_logvar): MLP(
45
+ (moduleList): ModuleList(
46
+ (0): Sequential(
47
+ (linear_1): Linear(in_features=127, out_features=64, bias=True)
48
+ (act_1): Tanh()
49
+ )
50
+ (1): Sequential(
51
+ (linear_1): Linear(in_features=64, out_features=64, bias=True)
52
+ (act_1): Tanh()
53
+ )
54
+ (2): Sequential(
55
+ (linear_1): Linear(in_features=64, out_features=32, bias=True)
56
+ (act_1): Identity()
57
+ )
58
+ )
59
+ )
60
+ )
61
+ )
62
+ (critic): CriticObs(
63
+ (Q1): ResidualMLP(
64
+ (layers): ModuleList(
65
+ (0): Linear(in_features=111, out_features=256, bias=True)
66
+ (1): TwoLayerPreActivationResNetLinear(
67
+ (l1): Linear(in_features=256, out_features=256, bias=True)
68
+ (l2): Linear(in_features=256, out_features=256, bias=True)
69
+ (act): Mish()
70
+ )
71
+ (2): Linear(in_features=256, out_features=1, bias=True)
72
+ (3): Identity()
73
+ )
74
+ )
75
+ )
76
+ )
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/checkpoint/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c9e47be0089acf12e3c654b3e08cddd1acc3de311b8a0816724341c562a8392
3
+ size 12129240
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/checkpoint/state_999.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12b211f06a3356b8b43262635bd2ff2944554c8e504b5865996663cf13e74077
3
+ size 12135418
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/explore_noise.png ADDED

Git LFS Details

  • SHA256: ab72cb38e0ba0b343f600f3d3b9bc60d8302165b241f7dad1092b53066ed6bf6
  • Pointer size: 130 Bytes
  • Size of remote file: 20.3 kB
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/result.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f93a4b41b51f4d13867f3a73a9f3ac3470a10fb8ec7d3fd2838dcba665d0fa
3
+ size 845615
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/run.log ADDED
The diff for this file is too large to render. See raw diff
 
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed0/2025-04-20_20-30-16_seed0/test_lr_schedulers.png ADDED

Git LFS Details

  • SHA256: d9e78cf85658d6dbff47d67da24506f522cd01012569d2b5bd82ce5645ac149b
  • Pointer size: 130 Bytes
  • Size of remote file: 40.7 kB
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/config.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ env_name: ant-medium-expert-v0
2
+ action_dim: 8
3
+ horizon_steps: 4
4
+ act_steps: 4
5
+ obs_dim: 111
6
+ cond_steps: 1
7
+ _target_: agent.finetune.mine.train_ppo_flow_agent.TrainPPOFlowAgent
8
+ name: ${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}
9
+ logdir: ${oc.env:DPPO_LOG_DIR}/gym/gym-finetune/${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}_tdf${ft_denoising_steps}/${now:%Y-%m-%d}_${now:%H-%M-%S}_seed${seed}
10
+ base_policy_path: ${oc.env:DPPO_LOG_DIR}/gym/gym-pretrain/ant-medium-expert-v0_pre_reflow_mlp_ta4_td20/2025-03-29_19-23-57_42/checkpoint/best_769.pt
11
+ normalization_path: ${oc.env:DPPO_DATA_DIR}/gym/${env_name}/normalization.npz
12
+ seed: 42
13
+ device: cuda:0
14
+ denoising_steps: 4
15
+ ft_denoising_steps: 4
16
+ env:
17
+ n_envs: 40
18
+ name: ${env_name}
19
+ max_episode_steps: 1000
20
+ reset_at_iteration: false
21
+ save_video: false
22
+ best_reward_threshold_for_success: 3
23
+ wrappers:
24
+ mujoco_locomotion_lowdim:
25
+ normalization_path: ${normalization_path}
26
+ multi_step:
27
+ n_obs_steps: ${cond_steps}
28
+ n_action_steps: ${act_steps}
29
+ max_episode_steps: ${env.max_episode_steps}
30
+ reset_within_step: true
31
+ wandb:
32
+ entity: ${oc.env:DPPO_WANDB_ENTITY}
33
+ project: gym-${env_name}-finetune
34
+ run: ${now:%Y-%m-%d}_${now:%H-%M-%S}_${name}
35
+ offline_mode: false
36
+ train:
37
+ n_train_itr: 1000
38
+ n_critic_warmup_itr: 0
39
+ n_steps: 500
40
+ gamma: 0.99
41
+ actor_lr: 4.5e-05
42
+ actor_weight_decay: 0
43
+ actor_lr_scheduler:
44
+ type: cosine
45
+ first_cycle_steps: 100
46
+ warmup_steps: 10
47
+ min_lr: 2.0e-05
48
+ critic_lr: 0.00065
49
+ critic_weight_decay: 1.0e-05
50
+ critic_lr_scheduler:
51
+ type: cosine
52
+ first_cycle_steps: 100
53
+ warmup_steps: 10
54
+ min_lr: 0.0003
55
+ save_model_freq: 300
56
+ val_freq: 10
57
+ render:
58
+ freq: 1
59
+ num: 0
60
+ reward_scale_running: true
61
+ reward_scale_const: 1.0
62
+ gae_lambda: 0.95
63
+ batch_size: 50000
64
+ update_epochs: 5
65
+ vf_coef: 0.5
66
+ ent_coef: 0.03
67
+ target_kl: 1.0
68
+ lr_schedule: fixed
69
+ repeat_samples: true
70
+ verbose: false
71
+ clip_intermediate_actions: true
72
+ account_for_initial_stochasticity: true
73
+ model:
74
+ _target_: model.flow.ft_ppo.ppoflow.PPOFlow
75
+ device: ${device}
76
+ policy:
77
+ _target_: model.flow.mlp_flow.FlowMLP
78
+ horizon_steps: ${horizon_steps}
79
+ action_dim: ${action_dim}
80
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
81
+ time_dim: 16
82
+ mlp_dims:
83
+ - 512
84
+ - 512
85
+ - 512
86
+ activation_type: ReLU
87
+ out_activation_type: Identity
88
+ use_layernorm: false
89
+ residual_style: true
90
+ critic:
91
+ _target_: model.common.critic.CriticObs
92
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
93
+ mlp_dims:
94
+ - 256
95
+ - 256
96
+ - 256
97
+ activation_type: Mish
98
+ residual_style: true
99
+ actor_policy_path: ${base_policy_path}
100
+ act_dim: ${action_dim}
101
+ horizon_steps: ${horizon_steps}
102
+ act_min: -1
103
+ act_max: 1
104
+ obs_dim: ${obs_dim}
105
+ cond_steps: ${cond_steps}
106
+ noise_scheduler_type: learn_decay
107
+ inference_steps: ${denoising_steps}
108
+ ft_denoising_steps: ${ft_denoising_steps}
109
+ randn_clip_value: 3
110
+ min_sampling_denoising_std: 0.08
111
+ min_logprob_denoising_std: 0.08
112
+ max_logprob_denoising_std: 0.16
113
+ logprob_min: -1.0
114
+ logprob_max: 1.0
115
+ clip_ploss_coef: 0.01
116
+ clip_ploss_coef_base: 0.01
117
+ clip_ploss_coef_rate: 3
118
+ clip_vloss_coef: null
119
+ denoised_clip_value: 1.0
120
+ time_dim_explore: 0
121
+ learn_explore_time_embedding: false
122
+ use_time_independent_noise: false
123
+ init_time_embedding: true
124
+ noise_hidden_dims:
125
+ - 64
126
+ - 64
127
+ logprob_debug_sample: false
128
+ logprob_debug_recalculate: false
129
+ explore_net_activation_type: Tanh
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/hydra.yaml ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${logdir}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task:
115
+ - device=cuda:1
116
+ - model.use_time_independent_noise=False
117
+ - model.noise_hidden_dims=[64,64]
118
+ - model.min_sampling_denoising_std=0.08
119
+ - model.min_logprob_denoising_std=0.08
120
+ - model.max_logprob_denoising_std=0.16
121
+ - train.ent_coef=0.03
122
+ - model.noise_scheduler_type=learn_decay
123
+ - seed=3407
124
+ job:
125
+ name: run
126
+ chdir: null
127
+ override_dirname: device=cuda:1,model.max_logprob_denoising_std=0.16,model.min_logprob_denoising_std=0.08,model.min_sampling_denoising_std=0.08,model.noise_hidden_dims=[64,64],model.noise_scheduler_type=learn_decay,model.use_time_independent_noise=False,seed=3407,train.ent_coef=0.03
128
+ id: ???
129
+ num: ???
130
+ config_name: ft_ppo_reflow_mlp_learn
131
+ env_set: {}
132
+ env_copy: []
133
+ config:
134
+ override_dirname:
135
+ kv_sep: '='
136
+ item_sep: ','
137
+ exclude_keys: []
138
+ runtime:
139
+ version: 1.3.2
140
+ version_base: '1.3'
141
+ cwd: /home/zhangtonghe/dppo
142
+ config_sources:
143
+ - path: hydra.conf
144
+ schema: pkg
145
+ provider: hydra
146
+ - path: /home/zhangtonghe/dppo/cfg
147
+ schema: file
148
+ provider: main
149
+ - path: /home/zhangtonghe/dppo/cfg/gym/finetune/ant-medium-expert-v0
150
+ schema: file
151
+ provider: command-line
152
+ - path: ''
153
+ schema: structured
154
+ provider: schema
155
+ output_dir: /home/zhangtonghe/dppo/log/gym/gym-finetune/ant-medium-expert-v0_ppo_reflow_mlp_ta4_td4_tdf4/2025-04-20_20-30-25_seed3407
156
+ choices:
157
+ hydra/env: default
158
+ hydra/callbacks: null
159
+ hydra/job_logging: default
160
+ hydra/hydra_logging: default
161
+ hydra/hydra_help: default
162
+ hydra/help: default
163
+ hydra/sweeper: basic
164
+ hydra/launcher: basic
165
+ hydra/output: default
166
+ verbose: false
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/.hydra/overrides.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ - device=cuda:1
2
+ - model.use_time_independent_noise=False
3
+ - model.noise_hidden_dims=[64,64]
4
+ - model.min_sampling_denoising_std=0.08
5
+ - model.min_logprob_denoising_std=0.08
6
+ - model.max_logprob_denoising_std=0.16
7
+ - train.ent_coef=0.03
8
+ - model.noise_scheduler_type=learn_decay
9
+ - seed=3407
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/architecture.log ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ self.model=PPOFlow(
2
+ (actor_old): FlowMLP(
3
+ (time_embedding): Sequential(
4
+ (0): SinusoidalPosEmb()
5
+ (1): Linear(in_features=16, out_features=32, bias=True)
6
+ (2): Mish()
7
+ (3): Linear(in_features=32, out_features=16, bias=True)
8
+ )
9
+ (mlp_mean): ResidualMLP(
10
+ (layers): ModuleList(
11
+ (0): Linear(in_features=159, out_features=512, bias=True)
12
+ (1): TwoLayerPreActivationResNetLinear(
13
+ (l1): Linear(in_features=512, out_features=512, bias=True)
14
+ (l2): Linear(in_features=512, out_features=512, bias=True)
15
+ (act): ReLU()
16
+ )
17
+ (2): Linear(in_features=512, out_features=32, bias=True)
18
+ (3): Identity()
19
+ )
20
+ )
21
+ )
22
+ (actor_ft): NoisyFlowMLP(
23
+ (policy): FlowMLP(
24
+ (time_embedding): Sequential(
25
+ (0): SinusoidalPosEmb()
26
+ (1): Linear(in_features=16, out_features=32, bias=True)
27
+ (2): Mish()
28
+ (3): Linear(in_features=32, out_features=16, bias=True)
29
+ )
30
+ (mlp_mean): ResidualMLP(
31
+ (layers): ModuleList(
32
+ (0): Linear(in_features=159, out_features=512, bias=True)
33
+ (1): TwoLayerPreActivationResNetLinear(
34
+ (l1): Linear(in_features=512, out_features=512, bias=True)
35
+ (l2): Linear(in_features=512, out_features=512, bias=True)
36
+ (act): ReLU()
37
+ )
38
+ (2): Linear(in_features=512, out_features=32, bias=True)
39
+ (3): Identity()
40
+ )
41
+ )
42
+ )
43
+ (explore_noise_net): ExploreNoiseNet(
44
+ (mlp_logvar): MLP(
45
+ (moduleList): ModuleList(
46
+ (0): Sequential(
47
+ (linear_1): Linear(in_features=127, out_features=64, bias=True)
48
+ (act_1): Tanh()
49
+ )
50
+ (1): Sequential(
51
+ (linear_1): Linear(in_features=64, out_features=64, bias=True)
52
+ (act_1): Tanh()
53
+ )
54
+ (2): Sequential(
55
+ (linear_1): Linear(in_features=64, out_features=32, bias=True)
56
+ (act_1): Identity()
57
+ )
58
+ )
59
+ )
60
+ )
61
+ )
62
+ (critic): CriticObs(
63
+ (Q1): ResidualMLP(
64
+ (layers): ModuleList(
65
+ (0): Linear(in_features=111, out_features=256, bias=True)
66
+ (1): TwoLayerPreActivationResNetLinear(
67
+ (l1): Linear(in_features=256, out_features=256, bias=True)
68
+ (l2): Linear(in_features=256, out_features=256, bias=True)
69
+ (act): Mish()
70
+ )
71
+ (2): Linear(in_features=256, out_features=1, bias=True)
72
+ (3): Identity()
73
+ )
74
+ )
75
+ )
76
+ )
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/checkpoint/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2ac28e2c3de5ac5a6c88ae5d8424488a168669b265e27da2fc849bf90783821
3
+ size 12129240
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/checkpoint/state_999.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a9490fb1e3d3c926896f2f497bba017655fdf8a1249e10cea8685e26c8d3e3c
3
+ size 12135418
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/explore_noise.png ADDED

Git LFS Details

  • SHA256: ab72cb38e0ba0b343f600f3d3b9bc60d8302165b241f7dad1092b53066ed6bf6
  • Pointer size: 130 Bytes
  • Size of remote file: 20.3 kB
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/result.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bab79232bd796b4ada6e144181b9241593e0193ba814a030ad25f032f445936
3
+ size 845615
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/run.log ADDED
The diff for this file is too large to render. See raw diff
 
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed3407/2025-04-20_20-30-25_seed3407/test_lr_schedulers.png ADDED

Git LFS Details

  • SHA256: d9e78cf85658d6dbff47d67da24506f522cd01012569d2b5bd82ce5645ac149b
  • Pointer size: 130 Bytes
  • Size of remote file: 40.7 kB
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/config.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ env_name: ant-medium-expert-v0
2
+ action_dim: 8
3
+ horizon_steps: 4
4
+ act_steps: 4
5
+ obs_dim: 111
6
+ cond_steps: 1
7
+ _target_: agent.finetune.mine.train_ppo_flow_agent.TrainPPOFlowAgent
8
+ name: ${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}
9
+ logdir: ${oc.env:DPPO_LOG_DIR}/gym/gym-finetune/${env_name}_ppo_reflow_mlp_ta${horizon_steps}_td${denoising_steps}_tdf${ft_denoising_steps}/${now:%Y-%m-%d}_${now:%H-%M-%S}_seed${seed}
10
+ base_policy_path: ${oc.env:DPPO_LOG_DIR}/gym/gym-pretrain/ant-medium-expert-v0_pre_reflow_mlp_ta4_td20/2025-03-29_19-23-57_42/checkpoint/best_769.pt
11
+ normalization_path: ${oc.env:DPPO_DATA_DIR}/gym/${env_name}/normalization.npz
12
+ seed: 42
13
+ device: cuda:0
14
+ denoising_steps: 4
15
+ ft_denoising_steps: 4
16
+ env:
17
+ n_envs: 40
18
+ name: ${env_name}
19
+ max_episode_steps: 1000
20
+ reset_at_iteration: false
21
+ save_video: false
22
+ best_reward_threshold_for_success: 3
23
+ wrappers:
24
+ mujoco_locomotion_lowdim:
25
+ normalization_path: ${normalization_path}
26
+ multi_step:
27
+ n_obs_steps: ${cond_steps}
28
+ n_action_steps: ${act_steps}
29
+ max_episode_steps: ${env.max_episode_steps}
30
+ reset_within_step: true
31
+ wandb:
32
+ entity: ${oc.env:DPPO_WANDB_ENTITY}
33
+ project: gym-${env_name}-finetune
34
+ run: ${now:%Y-%m-%d}_${now:%H-%M-%S}_${name}
35
+ offline_mode: false
36
+ train:
37
+ n_train_itr: 1000
38
+ n_critic_warmup_itr: 0
39
+ n_steps: 500
40
+ gamma: 0.99
41
+ actor_lr: 4.5e-05
42
+ actor_weight_decay: 0
43
+ actor_lr_scheduler:
44
+ type: cosine
45
+ first_cycle_steps: 100
46
+ warmup_steps: 10
47
+ min_lr: 2.0e-05
48
+ critic_lr: 0.00065
49
+ critic_weight_decay: 1.0e-05
50
+ critic_lr_scheduler:
51
+ type: cosine
52
+ first_cycle_steps: 100
53
+ warmup_steps: 10
54
+ min_lr: 0.0003
55
+ save_model_freq: 300
56
+ val_freq: 10
57
+ render:
58
+ freq: 1
59
+ num: 0
60
+ reward_scale_running: true
61
+ reward_scale_const: 1.0
62
+ gae_lambda: 0.95
63
+ batch_size: 50000
64
+ update_epochs: 5
65
+ vf_coef: 0.5
66
+ ent_coef: 0.03
67
+ target_kl: 1.0
68
+ lr_schedule: fixed
69
+ repeat_samples: true
70
+ verbose: false
71
+ clip_intermediate_actions: true
72
+ account_for_initial_stochasticity: true
73
+ model:
74
+ _target_: model.flow.ft_ppo.ppoflow.PPOFlow
75
+ device: ${device}
76
+ policy:
77
+ _target_: model.flow.mlp_flow.FlowMLP
78
+ horizon_steps: ${horizon_steps}
79
+ action_dim: ${action_dim}
80
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
81
+ time_dim: 16
82
+ mlp_dims:
83
+ - 512
84
+ - 512
85
+ - 512
86
+ activation_type: ReLU
87
+ out_activation_type: Identity
88
+ use_layernorm: false
89
+ residual_style: true
90
+ critic:
91
+ _target_: model.common.critic.CriticObs
92
+ cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
93
+ mlp_dims:
94
+ - 256
95
+ - 256
96
+ - 256
97
+ activation_type: Mish
98
+ residual_style: true
99
+ actor_policy_path: ${base_policy_path}
100
+ act_dim: ${action_dim}
101
+ horizon_steps: ${horizon_steps}
102
+ act_min: -1
103
+ act_max: 1
104
+ obs_dim: ${obs_dim}
105
+ cond_steps: ${cond_steps}
106
+ noise_scheduler_type: learn_decay
107
+ inference_steps: ${denoising_steps}
108
+ ft_denoising_steps: ${ft_denoising_steps}
109
+ randn_clip_value: 3
110
+ min_sampling_denoising_std: 0.08
111
+ min_logprob_denoising_std: 0.08
112
+ max_logprob_denoising_std: 0.16
113
+ logprob_min: -1.0
114
+ logprob_max: 1.0
115
+ clip_ploss_coef: 0.01
116
+ clip_ploss_coef_base: 0.01
117
+ clip_ploss_coef_rate: 3
118
+ clip_vloss_coef: null
119
+ denoised_clip_value: 1.0
120
+ time_dim_explore: 0
121
+ learn_explore_time_embedding: false
122
+ use_time_independent_noise: false
123
+ init_time_embedding: true
124
+ noise_hidden_dims:
125
+ - 64
126
+ - 64
127
+ logprob_debug_sample: false
128
+ logprob_debug_recalculate: false
129
+ explore_net_activation_type: Tanh
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/hydra.yaml ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${logdir}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task:
115
+ - device=cuda:3
116
+ - model.use_time_independent_noise=False
117
+ - model.noise_hidden_dims=[64,64]
118
+ - model.min_sampling_denoising_std=0.08
119
+ - model.min_logprob_denoising_std=0.08
120
+ - model.max_logprob_denoising_std=0.16
121
+ - train.ent_coef=0.03
122
+ - model.noise_scheduler_type=learn_decay
123
+ job:
124
+ name: run
125
+ chdir: null
126
+ override_dirname: device=cuda:3,model.max_logprob_denoising_std=0.16,model.min_logprob_denoising_std=0.08,model.min_sampling_denoising_std=0.08,model.noise_hidden_dims=[64,64],model.noise_scheduler_type=learn_decay,model.use_time_independent_noise=False,train.ent_coef=0.03
127
+ id: ???
128
+ num: ???
129
+ config_name: ft_ppo_reflow_mlp_learn
130
+ env_set: {}
131
+ env_copy: []
132
+ config:
133
+ override_dirname:
134
+ kv_sep: '='
135
+ item_sep: ','
136
+ exclude_keys: []
137
+ runtime:
138
+ version: 1.3.2
139
+ version_base: '1.3'
140
+ cwd: /home/zhangtonghe/dppo
141
+ config_sources:
142
+ - path: hydra.conf
143
+ schema: pkg
144
+ provider: hydra
145
+ - path: /home/zhangtonghe/dppo/cfg
146
+ schema: file
147
+ provider: main
148
+ - path: /home/zhangtonghe/dppo/cfg/gym/finetune/ant-medium-expert-v0
149
+ schema: file
150
+ provider: command-line
151
+ - path: ''
152
+ schema: structured
153
+ provider: schema
154
+ output_dir: /home/zhangtonghe/dppo/log/gym/gym-finetune/ant-medium-expert-v0_ppo_reflow_mlp_ta4_td4_tdf4/2025-04-20_10-14-30_seed42
155
+ choices:
156
+ hydra/env: default
157
+ hydra/callbacks: null
158
+ hydra/job_logging: default
159
+ hydra/hydra_logging: default
160
+ hydra/hydra_help: default
161
+ hydra/help: default
162
+ hydra/sweeper: basic
163
+ hydra/launcher: basic
164
+ hydra/output: default
165
+ verbose: false
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/.hydra/overrides.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ - device=cuda:3
2
+ - model.use_time_independent_noise=False
3
+ - model.noise_hidden_dims=[64,64]
4
+ - model.min_sampling_denoising_std=0.08
5
+ - model.min_logprob_denoising_std=0.08
6
+ - model.max_logprob_denoising_std=0.16
7
+ - train.ent_coef=0.03
8
+ - model.noise_scheduler_type=learn_decay
log/log_gym_finetuned/gym/ant-v0/ReFlow/seed42/2025-04-20_10-14-30_seed42/architecture.log ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ self.model=PPOFlow(
2
+ (actor_old): FlowMLP(
3
+ (time_embedding): Sequential(
4
+ (0): SinusoidalPosEmb()
5
+ (1): Linear(in_features=16, out_features=32, bias=True)
6
+ (2): Mish()
7
+ (3): Linear(in_features=32, out_features=16, bias=True)
8
+ )
9
+ (mlp_mean): ResidualMLP(
10
+ (layers): ModuleList(
11
+ (0): Linear(in_features=159, out_features=512, bias=True)
12
+ (1): TwoLayerPreActivationResNetLinear(
13
+ (l1): Linear(in_features=512, out_features=512, bias=True)
14
+ (l2): Linear(in_features=512, out_features=512, bias=True)
15
+ (act): ReLU()
16
+ )
17
+ (2): Linear(in_features=512, out_features=32, bias=True)
18
+ (3): Identity()
19
+ )
20
+ )
21
+ )
22
+ (actor_ft): NoisyFlowMLP(
23
+ (policy): FlowMLP(
24
+ (time_embedding): Sequential(
25
+ (0): SinusoidalPosEmb()
26
+ (1): Linear(in_features=16, out_features=32, bias=True)
27
+ (2): Mish()
28
+ (3): Linear(in_features=32, out_features=16, bias=True)
29
+ )
30
+ (mlp_mean): ResidualMLP(
31
+ (layers): ModuleList(
32
+ (0): Linear(in_features=159, out_features=512, bias=True)
33
+ (1): TwoLayerPreActivationResNetLinear(
34
+ (l1): Linear(in_features=512, out_features=512, bias=True)
35
+ (l2): Linear(in_features=512, out_features=512, bias=True)
36
+ (act): ReLU()
37
+ )
38
+ (2): Linear(in_features=512, out_features=32, bias=True)
39
+ (3): Identity()
40
+ )
41
+ )
42
+ )
43
+ (explore_noise_net): ExploreNoiseNet(
44
+ (mlp_logvar): MLP(
45
+ (moduleList): ModuleList(
46
+ (0): Sequential(
47
+ (linear_1): Linear(in_features=127, out_features=64, bias=True)
48
+ (act_1): Tanh()
49
+ )
50
+ (1): Sequential(
51
+ (linear_1): Linear(in_features=64, out_features=64, bias=True)
52
+ (act_1): Tanh()
53
+ )
54
+ (2): Sequential(
55
+ (linear_1): Linear(in_features=64, out_features=32, bias=True)
56
+ (act_1): Identity()
57
+ )
58
+ )
59
+ )
60
+ )
61
+ )
62
+ (critic): CriticObs(
63
+ (Q1): ResidualMLP(
64
+ (layers): ModuleList(
65
+ (0): Linear(in_features=111, out_features=256, bias=True)
66
+ (1): TwoLayerPreActivationResNetLinear(
67
+ (l1): Linear(in_features=256, out_features=256, bias=True)
68
+ (l2): Linear(in_features=256, out_features=256, bias=True)
69
+ (act): Mish()
70
+ )
71
+ (2): Linear(in_features=256, out_features=1, bias=True)
72
+ (3): Identity()
73
+ )
74
+ )
75
+ )
76
+ )