zhuangxialie commited on
Commit
4c563c8
·
verified ·
1 Parent(s): 0574ea0

Model save

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen-code-7B-SFT-5k
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - sft
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen-code-7B-SFT-5k
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="ZhuangXialie/Qwen-code-7B-SFT-5k", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/zhuangxialie-ubiquant-investment/huggingface/runs/tt1qu69x)
30
+
31
+
32
+ This model was trained with SFT.
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.16.0.dev0
37
+ - Transformers: 4.49.0
38
+ - Pytorch: 2.4.0
39
+ - Datasets: 3.5.0
40
+ - Tokenizers: 0.21.1
41
+
42
+ ## Citations
43
+
44
+
45
+
46
+ Cite TRL as:
47
+
48
+ ```bibtex
49
+ @misc{vonwerra2022trl,
50
+ title = {{TRL: Transformer Reinforcement Learning}},
51
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
52
+ year = 2020,
53
+ journal = {GitHub repository},
54
+ publisher = {GitHub},
55
+ howpublished = {\url{https://github.com/huggingface/trl}}
56
+ }
57
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 55862298214400.0,
3
+ "train_loss": 0.25453417761058644,
4
+ "train_runtime": 699.0514,
5
+ "train_samples": 4958,
6
+ "train_samples_per_second": 2.666,
7
+ "train_steps_per_second": 0.166
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.49.0"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 55862298214400.0,
3
+ "train_loss": 0.25453417761058644,
4
+ "train_runtime": 699.0514,
5
+ "train_samples": 4958,
6
+ "train_samples_per_second": 2.666,
7
+ "train_steps_per_second": 0.166
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.8813559322033897,
5
+ "eval_steps": 500,
6
+ "global_step": 116,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1694915254237288,
13
+ "grad_norm": 1.030324124303403,
14
+ "learning_rate": 2.0833333333333336e-05,
15
+ "loss": 0.4959,
16
+ "mean_token_accuracy": 0.8713347434997558,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.3389830508474576,
21
+ "grad_norm": 0.5717467306853743,
22
+ "learning_rate": 4.166666666666667e-05,
23
+ "loss": 0.4216,
24
+ "mean_token_accuracy": 0.8822458744049072,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.5084745762711864,
29
+ "grad_norm": 0.30670252885331056,
30
+ "learning_rate": 4.9907672546384545e-05,
31
+ "loss": 0.3837,
32
+ "mean_token_accuracy": 0.8890745401382446,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.6779661016949152,
37
+ "grad_norm": 0.28508850602109415,
38
+ "learning_rate": 4.9346190892086174e-05,
39
+ "loss": 0.3635,
40
+ "mean_token_accuracy": 0.8940897285938263,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.847457627118644,
45
+ "grad_norm": 0.231083779269082,
46
+ "learning_rate": 4.8287289481503954e-05,
47
+ "loss": 0.3489,
48
+ "mean_token_accuracy": 0.8982042372226715,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 1.0,
53
+ "grad_norm": 0.2246638901095726,
54
+ "learning_rate": 4.675507862678257e-05,
55
+ "loss": 0.346,
56
+ "mean_token_accuracy": 0.8966788053512573,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 1.1694915254237288,
61
+ "grad_norm": 0.22838220228380773,
62
+ "learning_rate": 4.478444550590631e-05,
63
+ "loss": 0.2887,
64
+ "mean_token_accuracy": 0.9111442804336548,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 1.3389830508474576,
69
+ "grad_norm": 0.23878993019310235,
70
+ "learning_rate": 4.2420259810417894e-05,
71
+ "loss": 0.2754,
72
+ "mean_token_accuracy": 0.9148677706718444,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 1.5084745762711864,
77
+ "grad_norm": 0.16810576615741815,
78
+ "learning_rate": 3.9716352099533276e-05,
79
+ "loss": 0.2806,
80
+ "mean_token_accuracy": 0.9128606796264649,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 1.6779661016949152,
85
+ "grad_norm": 0.2005840241516876,
86
+ "learning_rate": 3.6734288122687036e-05,
87
+ "loss": 0.2748,
88
+ "mean_token_accuracy": 0.9143766462802887,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 1.847457627118644,
93
+ "grad_norm": 0.19954679142899281,
94
+ "learning_rate": 3.354196701817348e-05,
95
+ "loss": 0.2685,
96
+ "mean_token_accuracy": 0.9158895432949066,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 2.0,
101
+ "grad_norm": 0.35183907693360755,
102
+ "learning_rate": 3.021207530574477e-05,
103
+ "loss": 0.2616,
104
+ "mean_token_accuracy": 0.9172095722622342,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 2.169491525423729,
109
+ "grad_norm": 0.22105077779359006,
110
+ "learning_rate": 2.6820431874480006e-05,
111
+ "loss": 0.2095,
112
+ "mean_token_accuracy": 0.9329459547996521,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 2.3389830508474576,
117
+ "grad_norm": 0.19846361542341748,
118
+ "learning_rate": 2.344426164918712e-05,
119
+ "loss": 0.2033,
120
+ "mean_token_accuracy": 0.9337680399417877,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 2.5084745762711864,
125
+ "grad_norm": 0.18894701230706007,
126
+ "learning_rate": 2.0160437242530445e-05,
127
+ "loss": 0.1939,
128
+ "mean_token_accuracy": 0.937131541967392,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 2.6779661016949152,
133
+ "grad_norm": 0.20211452980324235,
134
+ "learning_rate": 1.704372862901521e-05,
135
+ "loss": 0.1914,
136
+ "mean_token_accuracy": 0.9378842771053314,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 2.847457627118644,
141
+ "grad_norm": 0.1894485868510866,
142
+ "learning_rate": 1.4165100694309626e-05,
143
+ "loss": 0.1949,
144
+ "mean_token_accuracy": 0.9367973864078522,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 3.0,
149
+ "grad_norm": 0.2043299022821172,
150
+ "learning_rate": 1.1590097423302684e-05,
151
+ "loss": 0.1836,
152
+ "mean_token_accuracy": 0.9393455651071336,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 3.169491525423729,
157
+ "grad_norm": 0.1901039140832894,
158
+ "learning_rate": 9.377349517603714e-06,
159
+ "loss": 0.1444,
160
+ "mean_token_accuracy": 0.9531322956085205,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 3.3389830508474576,
165
+ "grad_norm": 0.17816548408646005,
166
+ "learning_rate": 7.57723942280278e-06,
167
+ "loss": 0.1399,
168
+ "mean_token_accuracy": 0.9542467951774597,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 3.5084745762711864,
173
+ "grad_norm": 0.202508229530206,
174
+ "learning_rate": 6.230754161720593e-06,
175
+ "loss": 0.1377,
176
+ "mean_token_accuracy": 0.9551035225391388,
177
+ "step": 105
178
+ },
179
+ {
180
+ "epoch": 3.6779661016949152,
181
+ "grad_norm": 0.18079082215089404,
182
+ "learning_rate": 5.368552093689271e-06,
183
+ "loss": 0.1365,
184
+ "mean_token_accuracy": 0.9550769627094269,
185
+ "step": 110
186
+ },
187
+ {
188
+ "epoch": 3.847457627118644,
189
+ "grad_norm": 0.18036470362534995,
190
+ "learning_rate": 5.010264848985499e-06,
191
+ "loss": 0.1337,
192
+ "mean_token_accuracy": 0.9559988558292389,
193
+ "step": 115
194
+ },
195
+ {
196
+ "epoch": 3.8813559322033897,
197
+ "mean_token_accuracy": 0.9553212523460388,
198
+ "step": 116,
199
+ "total_flos": 55862298214400.0,
200
+ "train_loss": 0.25453417761058644,
201
+ "train_runtime": 699.0514,
202
+ "train_samples_per_second": 2.666,
203
+ "train_steps_per_second": 0.166
204
+ }
205
+ ],
206
+ "logging_steps": 5,
207
+ "max_steps": 116,
208
+ "num_input_tokens_seen": 0,
209
+ "num_train_epochs": 4,
210
+ "save_steps": 2000,
211
+ "stateful_callbacks": {
212
+ "TrainerControl": {
213
+ "args": {
214
+ "should_epoch_stop": false,
215
+ "should_evaluate": false,
216
+ "should_log": false,
217
+ "should_save": true,
218
+ "should_training_stop": true
219
+ },
220
+ "attributes": {}
221
+ }
222
+ },
223
+ "total_flos": 55862298214400.0,
224
+ "train_batch_size": 1,
225
+ "trial_name": null,
226
+ "trial_params": null
227
+ }