This repository has been archived by the owner on Sep 30, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
sg.config.yaml
2215 lines (2031 loc) · 72.8 KB
/
sg.config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Documentation for how to override sg configuration for local development:
# https://github.com/sourcegraph/sourcegraph/blob/main/doc/dev/background-information/sg/index.md#configuration
env:
GITSERVER_MEMORY_OBSERVATION_ENABLED: 'true'
PGPORT: 5432
PGHOST: localhost
PGUSER: sourcegraph
PGPASSWORD: sourcegraph
PGDATABASE: sourcegraph
PGSSLMODE: disable
SG_DEV_MIGRATE_ON_APPLICATION_STARTUP: 'true'
INSECURE_DEV: true
SRC_REPOS_DIR: $HOME/.sourcegraph/repos
SRC_LOG_LEVEL: info
SRC_LOG_FORMAT: condensed
SRC_TRACE_LOG: false
# Set this to true to show an iTerm link to the file:line where the log message came from
SRC_LOG_SOURCE_LINK: false
# Use two gitserver instances in local dev
SRC_GIT_SERVER_1: 127.0.0.1:3501
SRC_GIT_SERVER_2: 127.0.0.1:3502
SRC_GIT_SERVERS: 127.0.0.1:3501 127.0.0.1:3502
# Enable sharded indexed search mode:
INDEXED_SEARCH_SERVERS: localhost:3070 localhost:3071
GO111MODULE: 'on'
DEPLOY_TYPE: dev
SRC_HTTP_ADDR: ':3082'
# I don't think we even need to set these?
SEARCHER_URL: http://127.0.0.1:3181
REPO_UPDATER_URL: http://127.0.0.1:3182
REDIS_ENDPOINT: 127.0.0.1:6379
SYMBOLS_URL: http://localhost:3184
SRC_SYNTECT_SERVER: http://localhost:9238
SRC_FRONTEND_INTERNAL: localhost:3090
GRAFANA_SERVER_URL: http://localhost:3370
PROMETHEUS_URL: http://localhost:9090
JAEGER_SERVER_URL: http://localhost:16686
SRC_DEVELOPMENT: 'true'
SRC_PROF_HTTP: ''
SRC_PROF_SERVICES: |
[
{ "Name": "frontend", "Host": "127.0.0.1:6063" },
{ "Name": "gitserver-0", "Host": "127.0.0.1:3551" },
{ "Name": "gitserver-1", "Host": "127.0.0.1:3552" },
{ "Name": "searcher", "Host": "127.0.0.1:6069" },
{ "Name": "symbols", "Host": "127.0.0.1:6071" },
{ "Name": "repo-updater", "Host": "127.0.0.1:6074" },
{ "Name": "codeintel-worker", "Host": "127.0.0.1:6088" },
{ "Name": "worker", "Host": "127.0.0.1:6089" },
{ "Name": "worker-executors", "Host": "127.0.0.1:6996" },
{ "Name": "embeddings", "Host": "127.0.0.1:6099" },
{ "Name": "zoekt-index-0", "Host": "127.0.0.1:6072" },
{ "Name": "zoekt-index-1", "Host": "127.0.0.1:6073" },
{ "Name": "syntactic-code-intel-worker-0", "Host": "127.0.0.1:6075" },
{ "Name": "syntactic-code-intel-worker-1", "Host": "127.0.0.1:6076" },
{ "Name": "zoekt-web-0", "Host": "127.0.0.1:3070", "DefaultPath": "/debug/requests/" },
{ "Name": "zoekt-web-1", "Host": "127.0.0.1:3071", "DefaultPath": "/debug/requests/" }
]
# Settings/config
SITE_CONFIG_FILE: ./dev/site-config.json
SITE_CONFIG_ALLOW_EDITS: true
GLOBAL_SETTINGS_FILE: ./dev/global-settings.json
GLOBAL_SETTINGS_ALLOW_EDITS: true
# Point codeintel to the `frontend` database in development
CODEINTEL_PGPORT: $PGPORT
CODEINTEL_PGHOST: $PGHOST
CODEINTEL_PGUSER: $PGUSER
CODEINTEL_PGPASSWORD: $PGPASSWORD
CODEINTEL_PGDATABASE: $PGDATABASE
CODEINTEL_PGSSLMODE: $PGSSLMODE
CODEINTEL_PGDATASOURCE: $PGDATASOURCE
CODEINTEL_PG_ALLOW_SINGLE_DB: true
# Required for `frontend` and `web` commands
SOURCEGRAPH_HTTPS_DOMAIN: sourcegraph.test
SOURCEGRAPH_HTTPS_PORT: 3443
# Required for `web` commands
NODE_OPTIONS: '--max_old_space_size=8192'
# Default `NODE_ENV` to `development`
NODE_ENV: development
# Required for codeintel object storage
PRECISE_CODE_INTEL_UPLOAD_AWS_ENDPOINT: http://localhost:9000
PRECISE_CODE_INTEL_UPLOAD_BACKEND: blobstore
# Required for embeddings job upload
EMBEDDINGS_UPLOAD_AWS_ENDPOINT: http://localhost:9000
# Required for upload of search job results
SEARCH_JOBS_UPLOAD_AWS_ENDPOINT: http://localhost:9000
# Point code insights to the `frontend` database in development
CODEINSIGHTS_PGPORT: $PGPORT
CODEINSIGHTS_PGHOST: $PGHOST
CODEINSIGHTS_PGUSER: $PGUSER
CODEINSIGHTS_PGPASSWORD: $PGPASSWORD
CODEINSIGHTS_PGDATABASE: $PGDATABASE
CODEINSIGHTS_PGSSLMODE: $PGSSLMODE
CODEINSIGHTS_PGDATASOURCE: $PGDATASOURCE
# Disable code insights by default
DB_STARTUP_TIMEOUT: 120s # codeinsights-db needs more time to start in some instances.
DISABLE_CODE_INSIGHTS_HISTORICAL: true
DISABLE_CODE_INSIGHTS: true
# # OpenTelemetry in dev - use single http/json endpoint
# OTEL_EXPORTER_OTLP_ENDPOINT: http://127.0.0.1:4318
# OTEL_EXPORTER_OTLP_PROTOCOL: http/json
# Enable gRPC Web UI for debugging
GRPC_WEB_UI_ENABLED: 'true'
# Enable full protobuf message logging when an internal error occurred
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_ENABLED: 'true'
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_JSON_TRUNCATION_SIZE_BYTES: '1KB'
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_HANDLING_MAX_MESSAGE_SIZE_BYTES: '100MB'
## zoekt-specific message logging
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_ENABLED: 'true'
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_JSON_TRUNCATION_SIZE_BYTES: '1KB'
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_HANDLING_MAX_MESSAGE_SIZE_BYTES: '100MB'
# Telemetry V2 export configuration. By default, this points to a test
# instance (go/msp-ops/telemetry-gateway#dev). Set the following:
#
# TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: 'http://127.0.0.1:6080'
#
# in 'sg.config.overwrite.yaml' to point to a locally running Telemetry
# Gateway instead (via 'sg run telemetry-gateway')
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "https://telemetry-gateway.sgdev.org:443"
SRC_TELEMETRY_EVENTS_EXPORT_ALL: 'true'
# By default, allow temporary edits to external services.
EXTSVC_CONFIG_ALLOW_EDITS: true
commands:
server:
description: Run an all-in-one sourcegraph/server image
cmd: ./dev/run-server-image.sh
env:
TAG: insiders
CLEAN: 'true'
DATA: '/tmp/sourcegraph-data'
URL: 'http://localhost:7080'
frontend:
description: Frontend
cmd: |
# TODO: This should be fixed
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
# If EXTSVC_CONFIG_FILE is *unset*, set a default.
export EXTSVC_CONFIG_FILE=${EXTSVC_CONFIG_FILE-'../dev-private/enterprise/dev/external-services-config.json'}
.bin/frontend
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/frontend github.com/sourcegraph/sourcegraph/cmd/frontend
checkBinary: .bin/frontend
env:
CONFIGURATION_MODE: server
USE_ENHANCED_LANGUAGE_DETECTION: false
SITE_CONFIG_FILE: '../dev-private/enterprise/dev/site-config.json'
SITE_CONFIG_ESCAPE_HATCH_PATH: '$HOME/.sourcegraph/site-config.json'
# frontend processes need this to be so that the paths to the assets are rendered correctly
WEB_BUILDER_DEV_SERVER: 1
watch:
- lib
- internal
- cmd/frontend
gitserver-template: &gitserver_template
cmd: .bin/gitserver
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/gitserver github.com/sourcegraph/sourcegraph/cmd/gitserver
checkBinary: .bin/gitserver
env:
HOSTNAME: 127.0.0.1:3178
watch:
- lib
- internal
- cmd/gitserver
# This is only here to stay backwards-compatible with people's custom
# `sg.config.overwrite.yaml` files
gitserver:
<<: *gitserver_template
gitserver-0:
<<: *gitserver_template
env:
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3501
GITSERVER_ADDR: 127.0.0.1:3501
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_1
SRC_PROF_HTTP: 127.0.0.1:3551
gitserver-1:
<<: *gitserver_template
env:
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3502
GITSERVER_ADDR: 127.0.0.1:3502
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_2
SRC_PROF_HTTP: 127.0.0.1:3552
repo-updater:
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/repo-updater
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/repo-updater github.com/sourcegraph/sourcegraph/cmd/repo-updater
checkBinary: .bin/repo-updater
watch:
- lib
- internal
- cmd/repo-updater
symbols:
cmd: .bin/symbols
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
# Ensure scip-ctags-dev is installed to avoid prompting the user to
# install it manually.
if [ ! -f $(./dev/scip-ctags-install.sh which) ]; then
./dev/scip-ctags-install.sh
fi
go build -gcflags="$GCFLAGS" -o .bin/symbols github.com/sourcegraph/sourcegraph/cmd/symbols
checkBinary: .bin/symbols
env:
CTAGS_COMMAND: dev/universal-ctags-dev
SCIP_CTAGS_COMMAND: dev/scip-ctags-dev
CTAGS_PROCESSES: 2
USE_ROCKSKIP: 'false'
watch:
- lib
- internal
- cmd/symbols
embeddings:
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/embeddings
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/embeddings github.com/sourcegraph/sourcegraph/cmd/embeddings
checkBinary: .bin/embeddings
watch:
- lib
- internal
- cmd/embeddings
- internal/embeddings
worker:
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/worker
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/worker github.com/sourcegraph/sourcegraph/cmd/worker
checkBinary: .bin/worker
watch:
- lib
- internal
- cmd/worker
cody-gateway: &cody-gateway-defaults
cmd: &cody-gateway-cmd |
.bin/cody-gateway
install: &cody-gateway-install |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/cody-gateway github.com/sourcegraph/sourcegraph/cmd/cody-gateway
checkBinary: &cody-gateway-checkBinary .bin/cody-gateway
env: &cody-gateway-env
SRC_LOG_LEVEL: info
# Enables metrics in dev via debugserver
SRC_PROF_HTTP: '127.0.0.1:6098'
# Set in 'sg.config.overwrite.yaml' if you want to test local Cody Gateway:
# https://docs-legacy.sourcegraph.com/dev/how-to/cody_gateway
CODY_GATEWAY_DOTCOM_ACCESS_TOKEN: ''
CODY_GATEWAY_DOTCOM_API_URL: https://sourcegraph.test:3443/.api/graphql
CODY_GATEWAY_ALLOW_ANONYMOUS: true
CODY_GATEWAY_DIAGNOSTICS_SECRET: sekret
# Set in 'sg.config.overwrite.yaml' if you want to test upstream
# integrations from local Cody Gateway:
# Entitle: https://app.entitle.io/request?data=eyJkdXJhdGlvbiI6IjIxNjAwIiwianVzdGlmaWNhdGlvbiI6IldSSVRFIEpVU1RJRklDQVRJT04gSEVSRSIsInJvbGVJZHMiOlt7ImlkIjoiYjhmYTk2NzgtNDExZC00ZmU1LWE2NDYtMzY4Y2YzYzUwYjJlIiwidGhyb3VnaCI6ImI4ZmE5Njc4LTQxMWQtNGZlNS1hNjQ2LTM2OGNmM2M1MGIyZSIsInR5cGUiOiJyb2xlIn1dfQ%3D%3D
# GSM: https://console.cloud.google.com/security/secret-manager?project=cody-gateway-dev
CODY_GATEWAY_ANTHROPIC_ACCESS_TOKEN: sekret
CODY_GATEWAY_OPENAI_ACCESS_TOKEN: sekret
CODY_GATEWAY_FIREWORKS_ACCESS_TOKEN: sekret
CODY_GATEWAY_SOURCEGRAPH_EMBEDDINGS_API_TOKEN: sekret
CODY_GATEWAY_GOOGLE_ACCESS_TOKEN: sekret
# Connect to services that require SAMS M2M http://go/sams-m2m
SAMS_URL: https://accounts.sgdev.org
# Connect to Enterprise Portal running locally
CODY_GATEWAY_ENTERPRISE_PORTAL_URL: http://localhost:6081
externalSecrets: &cody-gateway-externalSecrets
SAMS_CLIENT_ID:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_ID
SAMS_CLIENT_SECRET:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_SECRET
watch: &cody-gateway-watch
- lib
- internal
- cmd/cody-gateway
cody-gateway-local-e2e:
cmd: *cody-gateway-cmd
install: *cody-gateway-install
checkBinary: *cody-gateway-checkBinary
watch: *cody-gateway-watch
env: # we can't use anchors here, because we're *removing* fields, which wouldn't work with a YAML override.
SRC_LOG_LEVEL: info
# Enables metrics in dev via debugserver
SRC_PROF_HTTP: '127.0.0.1:6098'
CODY_GATEWAY_ALLOW_ANONYMOUS: true
CODY_GATEWAY_DIAGNOSTICS_SECRET: sekret
# Default site-admin user's token, created by `sg db default-site-admin`
CODY_GATEWAY_DOTCOM_ACCESS_TOKEN: 'sgp_local_f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0'
CODY_GATEWAY_SOURCEGRAPH_EMBEDDINGS_API_TOKEN: sgp_local_f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0
CODY_GATEWAY_DOTCOM_API_URL: https://sourcegraph.test:3443/.api/graphql
# Connect to services that require SAMS M2M http://go/sams-m2m
SAMS_URL: https://accounts.sgdev.org
# Connect to Enterprise Portal running locally
CODY_GATEWAY_ENTERPRISE_PORTAL_URL: http://localhost:6081
externalSecrets: # same here
# SAMS
SAMS_CLIENT_ID:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_ID
SAMS_CLIENT_SECRET:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_SECRET
# Uses a separate Anthropic account, see 1Password
# https://start.1password.com/open/i?a=HEDEDSLHPBFGRBTKAKJWE23XX4&v=dnrhbauihkhjs5ag6vszsme45a&i=mr6paob2rlmxp7fgrx6gdr3l7e&h=team-sourcegraph.1password.com
CODY_GATEWAY_ANTHROPIC_ACCESS_TOKEN:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_CODY_GATEWAY_ANTHROPIC_ACCESS_TOKEN
# LLM Tokens
#
# 🚧 TODO(@jhchabran), Waiting on: https://sourcegraph.slack.com/archives/C01CSS3TC75/p1719502918899479
#
# CODY_GATEWAY_OPENAI_ACCESS_TOKEN:
# project: sourcegraph-local-dev
# name: SG_LOCAL_DEV_CODY_GATEWAY_OPENAI_ACCESS_TOKEN
# CODY_GATEWAY_FIREWORKS_ACCESS_TOKEN:
# project: sourcegraph-local-dev
# name: SG_LOCAL_DEV_CODY_GATEWAY_FIREWORKS_ACCESS_TOKEN
# CODY_GATEWAY_GOOGLE_ACCESS_TOKEN:
# project: sourcegraph-local-dev
# name: SG_LOCAL_DEV_CODY_GATEWAY_GOOGLE_ACCESS_TOKEN
telemetry-gateway:
cmd: |
# Telemetry Gateway needs this to parse and validate incoming license keys.
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/telemetry-gateway
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/telemetry-gateway github.com/sourcegraph/sourcegraph/cmd/telemetry-gateway
checkBinary: .bin/telemetry-gateway
env:
PORT: '6080'
DIAGNOSTICS_SECRET: sekret
TELEMETRY_GATEWAY_EVENTS_PUBSUB_ENABLED: false
SRC_LOG_LEVEL: info
GRPC_WEB_UI_ENABLED: true
# Set for convenience - use real values in sg.config.overwrite.yaml if you
# are interacting with RPCs that enforce SAMS M2M auth. See
# https://github.com/sourcegraph/accounts.sourcegraph.com/wiki/Operators-Cheat-Sheet#create-a-new-idp-client
TELEMETRY_GATEWAY_SAMS_CLIENT_ID: 'foo'
TELEMETRY_GATEWAY_SAMS_CLIENT_SECRET: 'bar'
watch:
- lib
- internal
- cmd/telemetry-gateway
- internal/telemetrygateway
pings:
cmd: |
.bin/pings
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/pings github.com/sourcegraph/sourcegraph/cmd/pings
checkBinary: .bin/pings
env:
PORT: '6080'
SRC_LOG_LEVEL: info
DIAGNOSTICS_SECRET: 'lifeisgood'
PINGS_PUBSUB_PROJECT_ID: 'telligentsourcegraph'
PINGS_PUBSUB_TOPIC_ID: 'server-update-checks-test'
HUBSPOT_ACCESS_TOKEN: ''
# Enables metrics in dev via debugserver
SRC_PROF_HTTP: '127.0.0.1:7011'
watch:
- lib
- internal
- cmd/pings
msp-example:
cmd: .bin/msp-example
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/msp-example github.com/sourcegraph/sourcegraph/cmd/msp-example
checkBinary: .bin/msp-example
env:
PORT: '9080'
DIAGNOSTICS_SECRET: sekret
SRC_LOG_LEVEL: debug
STATELESS_MODE: 'true'
watch:
- cmd/msp-example
- lib/managedservicesplatform
enterprise-portal:
cmd: |
export PGDSN="postgres://$PGUSER:$PGPASSWORD@$PGHOST:$PGPORT/{{ .Database }}?sslmode=$PGSSLMODE"
# Connect to local development database, with the assumption that it will
# have dotcom database tables.
export DOTCOM_PGDSN_OVERRIDE="postgres://$PGUSER:$PGPASSWORD@$PGHOST:$PGPORT/$PGDATABASE?sslmode=$PGSSLMODE"
# Enterprise Portal is responsible for generating license keys.
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/enterprise-portal
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/enterprise-portal github.com/sourcegraph/sourcegraph/cmd/enterprise-portal
# Ensure the "msp_iam" database exists (PostgreSQL has no "IF NOT EXISTS" option).
createdb -h $PGHOST -p $PGPORT -U $PGUSER msp_iam || true
# HACK: Forcibly handle a migration that GORM doesn't seem to handle right
psql -h $PGHOST -p $PGPORT -U $PGUSER -d $PGDATABASE \
-c 'ALTER TABLE enterprise_portal_subscriptions ALTER COLUMN id TYPE uuid USING id::uuid;' || true
checkBinary: .bin/enterprise-portal
env:
PORT: '6081'
DIAGNOSTICS_SECRET: sekret
SRC_LOG_LEVEL: debug
GRPC_WEB_UI_ENABLED: 'true'
ENVIRONMENT_ID: local
# Connects to local database, so include all licenses from local DB
DOTCOM_INCLUDE_PRODUCTION_LICENSES: 'true'
# Used for authentication
SAMS_URL: https://accounts.sgdev.org
REDIS_HOST: localhost
REDIS_PORT: 6379
externalSecrets:
ENTERPRISE_PORTAL_SAMS_CLIENT_ID:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_ID
ENTERPRISE_PORTAL_SAMS_CLIENT_SECRET:
project: sourcegraph-local-dev
name: SG_LOCAL_DEV_SAMS_CLIENT_SECRET
watch:
- lib
- cmd/enterprise-portal
searcher:
cmd: .bin/searcher
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/searcher github.com/sourcegraph/sourcegraph/cmd/searcher
checkBinary: .bin/searcher
watch:
- lib
- internal
- cmd/searcher
caddy:
ignoreStdout: true
ignoreStderr: true
cmd: .bin/caddy_${CADDY_VERSION} run --watch --config=${CADDY_FILE:-dev/Caddyfile}
install_func: installCaddy
env:
CADDY_VERSION: 2.7.3
web:
description: Enterprise version of the web app
cmd: pnpm --filter @sourcegraph/web dev
install: |
pnpm install
pnpm run generate
env:
ENABLE_OPEN_TELEMETRY: true
# Needed so that node can ping the caddy server
NODE_TLS_REJECT_UNAUTHORIZED: 0
web-sveltekit-prod:
description: Builds the production version of the SvelteKit app
cmd: pnpm --filter @sourcegraph/web-sveltekit build
install: |
pnpm install
pnpm run generate
web-sveltekit-server:
description: Starts the vite dev server for the SvelteKit app
cmd: pnpm --filter @sourcegraph/web-sveltekit dev
install: |
pnpm install
pnpm run generate
env:
# The SvelteKit app uses this environment variable to determine where
# to store the generated assets. We don't need to store them in a different
# place in this mode.
DEPLOY_TYPE: ""
web-standalone-http:
description: Standalone web frontend (dev) with API proxy to a configurable URL
cmd: pnpm --filter @sourcegraph/web serve:dev --color
install: |
pnpm install
pnpm run generate
env:
WEB_BUILDER_SERVE_INDEX: true
SOURCEGRAPH_API_URL: https://sourcegraph.sourcegraph.com
web-integration-build:
description: Build development web application for integration tests
cmd: pnpm --filter @sourcegraph/web run build
env:
INTEGRATION_TESTS: true
web-integration-build-prod:
description: Build production web application for integration tests
cmd: pnpm --filter @sourcegraph/web run build
env:
INTEGRATION_TESTS: true
NODE_ENV: production
docsite:
description: Docsite instance serving the docs
env:
RUN_SCRIPT_NAME: .bin/bazel_run_docsite.sh
cmd: |
# We tell bazel to write out a script to run docsite and run that script via sg otherwise
# when we get a SIGINT ... bazel gets killed but docsite doesn't get killed properly. So we use --script_path
# which tells bazel to write out a script to run docsite, and let sg run that script rather, which means
# any signal gets propagated and docsite gets properly terminated.
#
# We also specifically put this in .bin, since that directory is gitignored, otherwise the run script is left
# around and currently there is no clean way to remove it - even using a bash trap doesn't work, since the trap
# never gets executed due to sg running the script.
bazel run --script_path=${RUN_SCRIPT_NAME} --noshow_progress --noshow_loading_progress //doc:serve
./${RUN_SCRIPT_NAME}
syntax-highlighter:
ignoreStdout: true
ignoreStderr: true
cmd: |
docker run --name=syntax-highlighter --rm -p9238:9238 \
-e WORKERS=1 -e ROCKET_ADDRESS=0.0.0.0 \
sourcegraph/syntax-highlighter:insiders
install: |
# Remove containers by the old name, too.
docker inspect syntect_server >/dev/null 2>&1 && docker rm -f syntect_server || true
docker inspect syntax-highlighter >/dev/null 2>&1 && docker rm -f syntax-highlighter || true
# Pull syntax-highlighter latest insider image, only during install, but
# skip if OFFLINE=true is set.
if [[ "$OFFLINE" != "true" ]]; then
docker pull -q sourcegraph/syntax-highlighter:insiders
fi
zoekt-indexserver-template: &zoekt_indexserver_template
cmd: |
env PATH="${PWD}/.bin:$PATH" .bin/zoekt-sourcegraph-indexserver \
-sourcegraph_url 'http://localhost:3090' \
-index "$HOME/.sourcegraph/zoekt/index-$ZOEKT_NUM" \
-hostname "localhost:$ZOEKT_HOSTNAME_PORT" \
-interval 1m \
-listen "127.0.0.1:$ZOEKT_LISTEN_PORT" \
-cpu_fraction 0.25
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
mkdir -p .bin
export GOBIN="${PWD}/.bin"
go install -gcflags="$GCFLAGS" github.com/sourcegraph/zoekt/cmd/zoekt-archive-index
go install -gcflags="$GCFLAGS" github.com/sourcegraph/zoekt/cmd/zoekt-git-index
go install -gcflags="$GCFLAGS" github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver
checkBinary: .bin/zoekt-sourcegraph-indexserver
env: &zoektenv
CTAGS_COMMAND: dev/universal-ctags-dev
SCIP_CTAGS_COMMAND: dev/scip-ctags-dev
GRPC_ENABLED: true
zoekt-index-0:
<<: *zoekt_indexserver_template
env:
<<: *zoektenv
ZOEKT_NUM: 0
ZOEKT_HOSTNAME_PORT: 3070
ZOEKT_LISTEN_PORT: 6072
zoekt-index-1:
<<: *zoekt_indexserver_template
env:
<<: *zoektenv
ZOEKT_NUM: 1
ZOEKT_HOSTNAME_PORT: 3071
ZOEKT_LISTEN_PORT: 6073
zoekt-web-template: &zoekt_webserver_template
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
mkdir -p .bin
env GOBIN="${PWD}/.bin" go install -gcflags="$GCFLAGS" github.com/sourcegraph/zoekt/cmd/zoekt-webserver
checkBinary: .bin/zoekt-webserver
env:
JAEGER_DISABLED: true
OPENTELEMETRY_DISABLED: false
GOGC: 25
zoekt-web-0:
<<: *zoekt_webserver_template
cmd: env PATH="${PWD}/.bin:$PATH" .bin/zoekt-webserver -index "$HOME/.sourcegraph/zoekt/index-0" -pprof -rpc -indexserver_proxy -listen "127.0.0.1:3070"
zoekt-web-1:
<<: *zoekt_webserver_template
cmd: env PATH="${PWD}/.bin:$PATH" .bin/zoekt-webserver -index "$HOME/.sourcegraph/zoekt/index-1" -pprof -rpc -indexserver_proxy -listen "127.0.0.1:3071"
codeintel-worker:
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/codeintel-worker
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/codeintel-worker github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-worker
checkBinary: .bin/codeintel-worker
watch:
- lib
- internal
- cmd/precise-code-intel-worker
- lib/codeintel
syntactic-codeintel-worker-template: &syntactic_codeintel_worker_template
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/syntactic-code-intel-worker
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
if [ ! -f $(./dev/scip-syntax-install.sh which) ]; then
echo "Building scip-syntax"
./dev/scip-syntax-install.sh
fi
echo "Building codeintel-outkline-scip-worker"
go build -gcflags="$GCFLAGS" -o .bin/syntactic-code-intel-worker github.com/sourcegraph/sourcegraph/cmd/syntactic-code-intel-worker
checkBinary: .bin/syntactic-code-intel-worker
watch:
- lib
- internal
- cmd/syntactic-code-intel-worker
- lib/codeintel
env:
SCIP_SYNTAX_PATH: dev/scip-syntax-dev
syntactic-code-intel-worker-0:
<<: *syntactic_codeintel_worker_template
env:
SYNTACTIC_CODE_INTEL_WORKER_ADDR: 127.0.0.1:6075
syntactic-code-intel-worker-1:
<<: *syntactic_codeintel_worker_template
cmd: |
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
.bin/syntactic-code-intel-worker
env:
SYNTACTIC_CODE_INTEL_WORKER_ADDR: 127.0.0.1:6076
executor-template:
&executor_template # TMPDIR is set here so it's not set in the `install` process, which would trip up `go build`.
cmd: |
env TMPDIR="$HOME/.sourcegraph/executor-temp" .bin/executor
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/executor github.com/sourcegraph/sourcegraph/cmd/executor
checkBinary: .bin/executor
env:
# Required for frontend and executor to communicate
EXECUTOR_FRONTEND_URL: http://localhost:3080
# Must match the secret defined in the site config.
EXECUTOR_FRONTEND_PASSWORD: hunter2hunter2hunter2
# Disable firecracker inside executor in dev
EXECUTOR_USE_FIRECRACKER: false
EXECUTOR_QUEUE_NAME: TEMPLATE
watch:
- lib
- internal
- cmd/executor
executor-kubernetes-template: &executor_kubernetes_template
cmd: |
cd $MANIFEST_PATH
cleanup() {
kubectl delete jobs --all
kubectl delete -f .
}
kubectl delete -f . --ignore-not-found
kubectl apply -f .
trap cleanup EXIT SIGINT
while true; do
sleep 1
done
install: |
bazel run //cmd/executor-kubernetes:image_tarball
env:
IMAGE: executor-kubernetes:candidate
# TODO: This is required but should only be set on M1 Macs.
PLATFORM: linux/arm64
watch:
- lib
- internal
- cmd/executor
codeintel-executor:
<<: *executor_template
cmd: |
env TMPDIR="$HOME/.sourcegraph/indexer-temp" .bin/executor
env:
EXECUTOR_QUEUE_NAME: codeintel
# If you want to use this, either start it with `sg run batches-executor-firecracker` or
# modify the `commandsets.batches` in your local `sg.config.overwrite.yaml`
codeintel-executor-firecracker:
<<: *executor_template
cmd: |
env TMPDIR="$HOME/.sourcegraph/codeintel-executor-temp" \
sudo --preserve-env=TMPDIR,EXECUTOR_QUEUE_NAME,EXECUTOR_FRONTEND_URL,EXECUTOR_FRONTEND_PASSWORD,EXECUTOR_USE_FIRECRACKER \
.bin/executor
env:
EXECUTOR_USE_FIRECRACKER: true
EXECUTOR_QUEUE_NAME: codeintel
codeintel-executor-kubernetes:
<<: *executor_kubernetes_template
env:
MANIFEST_PATH: ./cmd/executor/kubernetes/codeintel
batches-executor:
<<: *executor_template
cmd: |
env TMPDIR="$HOME/.sourcegraph/batches-executor-temp" .bin/executor
env:
EXECUTOR_QUEUE_NAME: batches
EXECUTOR_MAXIMUM_NUM_JOBS: 8
# If you want to use this, either start it with `sg run batches-executor-firecracker` or
# modify the `commandsets.batches` in your local `sg.config.overwrite.yaml`
batches-executor-firecracker:
<<: *executor_template
cmd: |
env TMPDIR="$HOME/.sourcegraph/batches-executor-temp" \
sudo --preserve-env=TMPDIR,EXECUTOR_QUEUE_NAME,EXECUTOR_FRONTEND_URL,EXECUTOR_FRONTEND_PASSWORD,EXECUTOR_USE_FIRECRACKER \
.bin/executor
env:
EXECUTOR_USE_FIRECRACKER: true
EXECUTOR_QUEUE_NAME: batches
batches-executor-kubernetes:
<<: *executor_kubernetes_template
env:
MANIFEST_PATH: ./cmd/executor/kubernetes/batches
# This tool rebuilds the batcheshelper image every time the source of it is changed.
batcheshelper-builder:
# Nothing to run for this, we just want to re-run the install script every time.
cmd: exit 0
install: |
bazel run //cmd/batcheshelper:image_tarball
env:
IMAGE: batcheshelper:candidate
# TODO: This is required but should only be set on M1 Macs.
PLATFORM: linux/arm64
watch:
- cmd/batcheshelper
- lib/batches
continueWatchOnExit: true
multiqueue-executor:
<<: *executor_template
cmd: |
env TMPDIR="$HOME/.sourcegraph/multiqueue-executor-temp" .bin/executor
env:
EXECUTOR_QUEUE_NAME: ''
EXECUTOR_QUEUE_NAMES: 'codeintel,batches'
EXECUTOR_MAXIMUM_NUM_JOBS: 8
blobstore:
cmd: .bin/blobstore
install: |
# Ensure the old blobstore Docker container is not running
docker rm -f blobstore
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/blobstore github.com/sourcegraph/sourcegraph/cmd/blobstore
checkBinary: .bin/blobstore
watch:
- lib
- internal
- cmd/blobstore
env:
BLOBSTORE_DATA_DIR: $HOME/.sourcegraph-dev/data/blobstore-go
redis-postgres:
# Add the following overwrites to your sg.config.overwrite.yaml to use the docker-compose
# database:
#
# env:
# PGHOST: localhost
# PGPASSWORD: sourcegraph
# PGUSER: sourcegraph
#
# You could also add an overwrite to add `redis-postgres` to the relevant command set(s).
description: Dockerized version of redis and postgres
cmd: docker-compose -f dev/redis-postgres.yml up $COMPOSE_ARGS
env:
COMPOSE_ARGS: --force-recreate
jaeger:
cmd: |
echo "Jaeger will be available on http://localhost:16686/-/debug/jaeger/search"
.bin/jaeger-all-in-one-${JAEGER_VERSION} --log-level ${JAEGER_LOG_LEVEL} \
--collector.otlp.enabled \
--collector.otlp.grpc.host-port=:${JAEGER_OTLP_GRPC_PORT} \
--collector.otlp.http.host-port=:${JAEGER_OTLP_HTTP_PORT}
install_func: installJaeger
env:
JAEGER_VERSION: 1.45.0
JAEGER_DISK: $HOME/.sourcegraph-dev/data/jaeger
JAEGER_LOG_LEVEL: error
COLLECTOR_OTLP_ENABLED: 'true'
JAEGER_OTLP_GRPC_PORT: 4320
JAEGER_OTLP_HTTP_PORT: 4321
QUERY_BASE_PATH: /-/debug/jaeger
grafana:
cmd: |
if [[ $(uname) == "Linux" ]]; then
# Linux needs an extra arg to support host.internal.docker, which is how grafana connects
# to the prometheus backend.
ADD_HOST_FLAG="--add-host=host.docker.internal:host-gateway"
# Docker users on Linux will generally be using direct user mapping, which
# means that they'll want the data in the volume mount to be owned by the
# same user as is running this script. Fortunately, the Grafana container
# doesn't really care what user it runs as, so long as it can write to
# /var/lib/grafana.
DOCKER_USER="--user=$UID"
fi
echo "Grafana: serving on http://localhost:${PORT}"
echo "Grafana: note that logs are piped to ${GRAFANA_LOG_FILE}"
docker run --rm ${DOCKER_USER} \
--name=${CONTAINER} \
--cpus=1 \
--memory=1g \
-p 0.0.0.0:3370:3370 ${ADD_HOST_FLAG} \
-v "${GRAFANA_DISK}":/var/lib/grafana \
-v "$(pwd)"/dev/grafana/all:/sg_config_grafana/provisioning/datasources \
grafana:candidate >"${GRAFANA_LOG_FILE}" 2>&1
install: |
mkdir -p "${GRAFANA_DISK}"
mkdir -p "$(dirname ${GRAFANA_LOG_FILE})"
docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER
bazel run //docker-images/grafana:image_tarball
env:
GRAFANA_DISK: $HOME/.sourcegraph-dev/data/grafana
# Log file location: since we log outside of the Docker container, we should
# log somewhere that's _not_ ~/.sourcegraph-dev/data/grafana, since that gets
# volume mounted into the container and therefore has its own ownership
# semantics.
# Now for the actual logging. Grafana's output gets sent to stdout and stderr.
# We want to capture that output, but because it's fairly noisy, don't want to
# display it in the normal case.
GRAFANA_LOG_FILE: $HOME/.sourcegraph-dev/logs/grafana/grafana.log
IMAGE: grafana:candidate
CONTAINER: grafana
PORT: 3370
# docker containers must access things via docker host on non-linux platforms
DOCKER_USER: ''
ADD_HOST_FLAG: ''
CACHE: false
prometheus:
cmd: |
if [[ $(uname) == "Linux" ]]; then
DOCKER_USER="--user=$UID"
# Frontend generally runs outside of Docker, so to access it we need to be
# able to access ports on the host. --net=host is a very dirty way of
# enabling this.
DOCKER_NET="--net=host"
SRC_FRONTEND_INTERNAL="localhost:3090"
fi
echo "Prometheus: serving on http://localhost:${PORT}"
echo "Prometheus: note that logs are piped to ${PROMETHEUS_LOG_FILE}"
docker run --rm ${DOCKER_NET} ${DOCKER_USER} \
--name=${CONTAINER} \
--cpus=1 \
--memory=4g \
-p 0.0.0.0:9090:9090 \
-v "${PROMETHEUS_DISK}":/prometheus \
-v "$(pwd)/${CONFIG_DIR}":/sg_prometheus_add_ons \
-e SRC_FRONTEND_INTERNAL="${SRC_FRONTEND_INTERNAL}" \
-e DISABLE_SOURCEGRAPH_CONFIG="${DISABLE_SOURCEGRAPH_CONFIG:-""}" \
-e DISABLE_ALERTMANAGER="${DISABLE_ALERTMANAGER:-""}" \
-e PROMETHEUS_ADDITIONAL_FLAGS="--web.enable-lifecycle --web.enable-admin-api" \
${IMAGE} >"${PROMETHEUS_LOG_FILE}" 2>&1
install: |
mkdir -p "${PROMETHEUS_DISK}"
mkdir -p "$(dirname ${PROMETHEUS_LOG_FILE})"
docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER
if [[ $(uname) == "Linux" ]]; then
PROM_TARGETS="dev/prometheus/linux/prometheus_targets.yml"
fi
cp ${PROM_TARGETS} "${CONFIG_DIR}"/prometheus_targets.yml
bazel run //docker-images/prometheus:image_tarball
env:
PROMETHEUS_DISK: $HOME/.sourcegraph-dev/data/prometheus
# See comment above for `grafana`
PROMETHEUS_LOG_FILE: $HOME/.sourcegraph-dev/logs/prometheus/prometheus.log
IMAGE: prometheus:candidate
CONTAINER: prometheus
PORT: 9090
CONFIG_DIR: docker-images/prometheus/config
DOCKER_USER: ''
DOCKER_NET: ''
PROM_TARGETS: dev/prometheus/all/prometheus_targets.yml
SRC_FRONTEND_INTERNAL: host.docker.internal:3090
ADD_HOST_FLAG: ''
DISABLE_SOURCEGRAPH_CONFIG: false
postgres_exporter:
cmd: |
if [[ $(uname) == "Linux" ]]; then
# Linux needs an extra arg to support host.internal.docker, which is how grafana connects
# to the prometheus backend.
ADD_HOST_FLAG="--add-host=host.docker.internal:host-gateway"
fi
# Use psql to read the effective values for PG* env vars (instead of, e.g., hardcoding the default
# values).