| { | |
| "activation_dropout": 0.1, | |
| "activation_function": "gelu", | |
| "architectures": [ | |
| "TimeSeriesTransformerForPrediction" | |
| ], | |
| "attention_dropout": 0.1, | |
| "cardinality": [ | |
| 0 | |
| ], | |
| "context_length": 48, | |
| "d_model": 16, | |
| "decoder_attention_heads": 2, | |
| "decoder_ffn_dim": 32, | |
| "decoder_layerdrop": 0.1, | |
| "decoder_layers": 2, | |
| "distribution_output": "student_t", | |
| "dropout": 0.1, | |
| "embedding_dimension": [ | |
| 0 | |
| ], | |
| "encoder_attention_heads": 2, | |
| "encoder_ffn_dim": 32, | |
| "encoder_layerdrop": 0.1, | |
| "encoder_layers": 2, | |
| "feature_size": 47, | |
| "init_std": 0.02, | |
| "input_size": 1, | |
| "is_encoder_decoder": true, | |
| "lags_sequence": [ | |
| 1, | |
| 2, | |
| 3, | |
| 4, | |
| 5, | |
| 6, | |
| 7, | |
| 23, | |
| 24, | |
| 25, | |
| 47, | |
| 48, | |
| 49, | |
| 71, | |
| 72, | |
| 73, | |
| 95, | |
| 96, | |
| 97, | |
| 119, | |
| 120, | |
| 121, | |
| 143, | |
| 144, | |
| 145, | |
| 167, | |
| 168, | |
| 169, | |
| 335, | |
| 336, | |
| 337, | |
| 503, | |
| 504, | |
| 505, | |
| 671, | |
| 672, | |
| 673, | |
| 719, | |
| 720, | |
| 721 | |
| ], | |
| "loss": "nll", | |
| "model_type": "time_series_transformer", | |
| "num_dynamic_real_features": 0, | |
| "num_parallel_samples": 100, | |
| "num_static_categorical_features": 0, | |
| "num_static_real_features": 0, | |
| "num_time_features": 5, | |
| "prediction_length": 24, | |
| "scaling": "mean", | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.30.0.dev0", | |
| "use_cache": true | |
| } | |