-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathautods_config.yaml.example
More file actions
109 lines (94 loc) · 2.93 KB
/
autods_config.yaml.example
File metadata and controls
109 lines (94 loc) · 2.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
agents:
autods:
model: autods_model
max_steps: 200
# Validate that submission code imports required AutoML libraries
# (tsururu, replay, plts, lightautoml, or pyboost)
validate_submission_imports: false
analyst_steps: 5
researcher_steps: 5
planner_steps: 5
debugger_steps: 5
model_providers:
google:
api_key: ${GOOGLE_API_KEY}
provider: google
# OpenRouter provider (OpenAI-compatible API with multiple model backends)
# openrouter:
# api_key: ${OPENROUTER_API_KEY}
# provider: openai
# base_url: "https://openrouter.ai/api/v1"
models:
autods_model:
model_provider: google
model: gemini-2.0-flash
temperature: 0.5
max_retries: 10
# Example: OpenRouter model with provider filtering and custom headers
# openrouter_llama:
# model_provider: openrouter
# model: meta-llama/llama-3.1-70b-instruct
# max_retries: 10
# # Optional: Standard model parameters (temperature, top_p, etc.)
# # model_kwargs:
# # temperature: 0.7
# # top_p: 0.9
# # Optional: Provider-specific params passed in request body (e.g., OpenRouter)
# extra_body:
# provider:
# # Filter by quantization levels: fp8, fp16, bf16, fp32, int8, int4
# quantizations: ["fp8", "fp16"]
# # Try providers in order (OpenRouter-specific)
# order: ["together", "fireworks"]
# # Whether to allow fallback to other providers
# allow_fallbacks: true
# # Only use providers that don't store data
# data_collection: "deny"
# # Optional: Custom HTTP headers for the API requests
# default_headers:
# HTTP-Referer: "https://your-site.com"
# X-Title: "Your App Name"
grad:
# local
mode: local
host: localhost
port: 8000
# remote
# mode: remote
# host: 10.32.1.25
# port: 8446
env:
LANGSMITH_TRACING: "true"
LANGSMITH_ENDPOINT: "https://api.smith.langchain.com"
LANGSMITH_API_KEY: your-key
LANGSMITH_PROJECT: "AutoDS"
AUTODS_ENABLE_RERANKER: "true"
AUTODS_RERANKER_MODEL: "Qwen/Qwen3-Reranker-0.6B"
GRPC_VERBOSITY: "NONE"
# cognee llm model
LLM_PROVIDER:
LLM_MODEL:
LLM_API_KEY:
LLM_ENDPOINT:
# optional
LLM_API_VERSION:
LLM_MAX_TOKENS:
# cognee embedding model
EMBEDDING_PROVIDER: ""
EMBEDDING_MODEL: ""
EMBEDDING_ENDPOINT: ""
EMBEDDING_DIMENSIONS: ""
# remote model
EMBEDDING_API_KEY:
# optional
EMBEDDING_API_VERSION:
EMBEDDING_MAX_TOKENS:
# local ollama embedding model
HUGGINGFACE_TOKENIZER: ""
# cognee graph database
VECTOR_DB_PROVIDER: ""
DB_HOST: ""
DB_PORT: ""
DB_NAME: ""
DB_USERNAME: ""
DB_PASSWORD: ""