forked from Significant-Gravitas/AutoGPT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.template
176 lines (149 loc) · 6.77 KB
/
.env.template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
################################################################################
### AUTO-GPT - GENERAL SETTINGS
################################################################################
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
EXECUTE_LOCAL_COMMANDS=False
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
BROWSE_CHUNK_MAX_LENGTH=8192
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
BROWSE_SUMMARY_MAX_TOKEN=300
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
AI_SETTINGS_FILE=ai_settings.yaml
# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome).
# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
# USE_WEB_BROWSER=chrome
################################################################################
### LLM PROVIDER
################################################################################
### OPENAI
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
# TEMPERATURE - Sets temperature in OpenAI (Default: 0)
# USE_AZURE - Use Azure OpenAI or not (Default: False)
OPENAI_API_KEY=your-openai-api-key
TEMPERATURE=0
USE_AZURE=False
### AZURE
# cleanup azure env as already moved to `azure.yaml.template`
################################################################################
### LLM MODELS
################################################################################
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
SMART_LLM_MODEL=gpt-4
FAST_LLM_MODEL=gpt-3.5-turbo
### LLM MODEL SETTINGS
# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
# When using --gpt3only this needs to be set to 4000.
FAST_TOKEN_LIMIT=4000
SMART_TOKEN_LIMIT=8000
################################################################################
### MEMORY
################################################################################
### MEMORY_BACKEND - Memory backend type
# local - Default
# pinecone - Pinecone (if configured)
# redis - Redis (if configured)
MEMORY_BACKEND=local
### PINECONE
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
PINECONE_API_KEY=your-pinecone-api-key
PINECONE_ENV=your-pinecone-region
### REDIS
# REDIS_HOST - Redis host (Default: localhost)
# REDIS_PORT - Redis port (Default: 6379)
# REDIS_PASSWORD - Redis password (Default: "")
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=
WIPE_REDIS_ON_START=False
MEMORY_INDEX=auto-gpt
### WEAVIATE
# MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
# WEAVIATE_HOST - Weaviate host IP
# WEAVIATE_PORT - Weaviate host port
# WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
# USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
# WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
# WEAVIATE_USERNAME - Weaviate username
# WEAVIATE_PASSWORD - Weaviate password
# WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
# MEMORY_INDEX - Name of index to create in Weaviate
WEAVIATE_HOST="127.0.0.1"
WEAVIATE_PORT=8080
WEAVIATE_PROTOCOL="http"
USE_WEAVIATE_EMBEDDED=False
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
WEAVIATE_USERNAME=
WEAVIATE_PASSWORD=
WEAVIATE_API_KEY=
MEMORY_INDEX=AutoGpt
### MILVUS
# MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
# MILVUS_COLLECTION - Milvus collection,
# change it if you want to start a new memory and retain the old memory.
MILVUS_ADDR=your-milvus-cluster-host-port
MILVUS_COLLECTION=autogpt
MILVUS_USER=db_admin
MILVUS_PASSWORD=your-db-password
################################################################################
### IMAGE GENERATION PROVIDER
################################################################################
### OPEN AI
# IMAGE_PROVIDER - Image provider (Example: dalle)
IMAGE_PROVIDER=dalle
### HUGGINGFACE
# STABLE DIFFUSION
# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4)
# Set in image_gen.py)
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
################################################################################
### AUDIO TO TEXT PROVIDER
################################################################################
### HUGGINGFACE
HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
################################################################################
### GIT Provider for repository actions
################################################################################
### GITHUB
# GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
# GITHUB_USERNAME - Github username
GITHUB_API_KEY=github_pat_123
GITHUB_USERNAME=your-github-username
################################################################################
### SEARCH PROVIDER
################################################################################
### GOOGLE
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
GOOGLE_API_KEY=your-google-api-key
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
################################################################################
### TTS PROVIDER
################################################################################
### MAC OS
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
USE_MAC_OS_TTS=False
### STREAMELEMENTS
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
USE_BRIAN_TTS=False
### ELEVENLABS
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
ELEVENLABS_API_KEY=your-elevenlabs-api-key
ELEVENLABS_VOICE_1_ID=your-voice-id-1
ELEVENLABS_VOICE_2_ID=your-voice-id-2
################################################################################
### TWITTER API
################################################################################
TW_CONSUMER_KEY=
TW_CONSUMER_SECRET=
TW_ACCESS_TOKEN=
TW_ACCESS_TOKEN_SECRET=