{ "model_type": "chess_llm", "architectures": [ "ChessLLMForCausalLM" ], "vocab_size": 153, "hidden_size": 512, "num_attention_heads": 8, "num_hidden_layers": 6, "intermediate_size": 2048, "max_position_embeddings": 512, "pad_token_id": 0, "eos_token_id": 3, "bos_token_id": 2, "torch_dtype": "float32", "transformers_version": "4.30.0" }