File size: 575 Bytes
0b7c338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
[model]
@architectures = "TransformerDecoder"
n_blocks = 20
block_size = 4096
prenorm = "True"
rope_base = 10000

[model.attention]
@layers = "CausalSelfAttention"
n_in = 768
n_heads = 12
n_query_groups = 12
q_bias = "False"
k_bias = "False"
v_bias = "False"
o_bias = "False"

[model.embedding]
@layers = "TokenEmbedding"
n_embeddings = 21178
embedding_size = 768

[model.feedforward]
@layers = "SwiGLU"
n_in = 768
n_hidden = 3072

[model.head]
@layers = "ParametrizedLinear"
n_in = 768
n_out = 21178
bias = "False"

[model.norm]
@layers = "RMSNorm"
n_in = 768
eps = 0.000001