diff --git a/.dockerignore b/.dockerignore index 5cd47d71..e41bfc15 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,9 @@ **/.git **/.gitignore **/.DS_Store -**/*.tmp \ No newline at end of file +**/*.tmp + +.github +.dockerignore +Dockerfile +Dockerfile.* diff --git a/.github/workflows/anchore-analysis.yml b/.github/workflows/anchore-analysis.yml index 98370108..acdef9cd 100644 --- a/.github/workflows/anchore-analysis.yml +++ b/.github/workflows/anchore-analysis.yml @@ -21,17 +21,24 @@ jobs: Anchore-Build-Scan: runs-on: ubuntu-latest steps: - - name: Checkout the code - uses: actions/checkout@v4 - - name: Build the Docker image - run: docker build . --file Dockerfile --tag localbuild/testimage:latest - - name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled - uses: anchore/scan-action@v3 - with: - image-reference: "localbuild/testimage:latest" - dockerfile-path: "Dockerfile" - acs-report-enable: true - - name: Upload Anchore Scan Report - uses: github/codeql-action/upload-sarif@v1 - with: - sarif_file: results.sarif + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build the Docker image + uses: docker/build-push-action@v5 + with: + tags: localbuild/threagile:latest + file: Dockerfile.local + push: false + load: true + + - name: Scan image + uses: anchore/scan-action@v3 + with: + image: "localbuild/threagile:latest" + fail-build: false + + - name: Upload Anchore Scan Report + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a21762c5..de325f0c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -28,7 +28,7 @@ jobs: uses: actions/checkout@v4 with: # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. + # a pull request then we can check out the head. fetch-depth: 2 - name: Install Go @@ -39,14 +39,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -60,4 +60,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/gosec-analysis.yml b/.github/workflows/gosec-analysis.yml new file mode 100644 index 00000000..d4926df1 --- /dev/null +++ b/.github/workflows/gosec-analysis.yml @@ -0,0 +1,24 @@ +name: Security Static Analysis + +on: + push: + branches: [master] + pull_request: + # The branches below must be a subset of the branches above + branches: [master] + schedule: + - cron: '0 0 1 * *' + +jobs: + analyze: + name: GoSec + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v4 + - name: Run Gosec + uses: securego/gosec@master + with: + args: ./... diff --git a/.gitignore b/.gitignore index 9f411951..f932bbcb 100644 --- a/.gitignore +++ b/.gitignore @@ -23,5 +23,11 @@ stats.json # Output of the go coverage tool, specifically when used with LiteIDE *.out +# build artifacts +/vendor/ +/bin/ +/out*/ + # IDE stuff -.idea/ \ No newline at end of file +/.idea/ +/config.json \ No newline at end of file diff --git a/.grype.yaml b/.grype.yaml new file mode 100644 index 00000000..56dfefe4 --- /dev/null +++ b/.grype.yaml @@ -0,0 +1,47 @@ +ignore: + - vulnerability: CVE-2023-42363 + reason: + busybox(1.36.1-r15) - no upstream fix available + busybox-binsh(1.36.1-r15) - no upstream fix available + - vulnerability: CVE-2023-42364 + reason: + busybox(1.36.1-r15) - no upstream fix available + busybox-binsh(1.36.1-r15) - no upstream fix available + - vulnerability: CVE-2023-42365 + reason: + busybox(1.36.1-r15) - no upstream fix available + busybox-binsh(1.36.1-r15) - no upstream fix available + - vulnerability: CVE-2023-42366 + reason: + busybox(1.36.1-r15) - no upstream fix available + busybox-binsh(1.36.1-r15) - no upstream fix available + - vulnerability: CVE-2014-9157 + reason: + graphviz(8.0.5-r1) - no upstream fix available + graphviz-libs(8.0.5-r1) - no upstream fix available + - vulnerability: CVE-2023-37769 + reason: pixman(0.42.2-r1) - no upstream fix available + - vulnerability: CVE-2023-43789 + reason: libxpm(3.5.16-r1) - no upstream fix available + - vulnerability: CVE-2023-42363 + reason: ssl_client(1.36.1-r5) - no upstream fix available + - vulnerability: CVE-2023-42364 + reason: ssl_client(1.36.1-r5) - no upstream fix available + - vulnerability: CVE-2023-42365 + reason: ssl_client(1.36.1-r5) - no upstream fix available + - vulnerability: CVE-2015-7313 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-3164 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-40745 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-41175 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-6228 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-6277 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-52355 + reason: tiff( 4.5.1-r0) - no upstream fix available + - vulnerability: CVE-2023-52356 + reason: tiff( 4.5.1-r0) - no upstream fix available diff --git a/Dockerfile b/Dockerfile index ce3c034a..bd52e7c9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,11 @@ + + ###### ## Stage 1: Clone the Git repository ###### FROM alpine/git as clone WORKDIR /app + RUN git clone https://github.com/threagile/threagile.git @@ -12,76 +15,67 @@ RUN git clone https://github.com/threagile/threagile.git ## Stage 2: Build application with Go's build tools ###### FROM golang as build +WORKDIR /app + ENV GO111MODULE=on + # https://stackoverflow.com/questions/36279253/go-compiled-binary-wont-run-in-an-alpine-docker-container-on-ubuntu-host #ENV CGO_ENABLED=0 # cannot be set as otherwise plugins don't run -WORKDIR /app COPY --from=clone /app/threagile /app + RUN go version RUN go test ./... -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o raa.so raa/raa/raa.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o dummy.so raa/dummy/dummy.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o demo-rule.so risks/custom/demo/demo-rule.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o threagile +RUN GOOS=linux go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o raa_calc cmd/raa/main.go +RUN GOOS=linux go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o raa_dummy cmd/raa_dummy/main.go +RUN GOOS=linux go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o risk_demo_rule cmd/risk_demo/main.go +RUN GOOS=linux go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o threagile # add the -race parameter to go build call in order to instrument with race condition detector: https://blog.golang.org/race-detector # NOTE: copy files with final name to send to final build -RUN cp /app/demo/example/threagile.yaml /app/demo/example/threagile-example-model.yaml && \ - cp /app/demo/stub/threagile.yaml /app/demo/stub/threagile-stub-model.yaml +RUN cp /app/demo/example/threagile.yaml /app/demo/example/threagile-example-model.yaml +RUN cp /app/demo/stub/threagile.yaml /app/demo/stub/threagile-stub-model.yaml -###### -## Stage 3: Copy needed files into desired folder structure -###### -FROM scratch AS files - -COPY --from=build --chown=1000:1000 \ - /app/threagile \ - /app/raa.so \ - /app/dummy.so \ - /app/demo-rule.so \ - /app/LICENSE.txt \ - /app/report/template/background.pdf \ - /app/support/openapi.yaml \ - /app/support/schema.json \ - /app/support/live-templates.txt \ - /app/support/render-data-asset-diagram.sh \ - /app/support/render-data-flow-diagram.sh \ - /app/demo/example/threagile-example-model.yaml \ - /app/demo/stub/threagile-stub-model.yaml \ - \ - /app/ -COPY --from=build --chown=1000:1000 /app/server /app/server ###### -## Stage 4: Make final small image +## Stage 3: Make final small image ###### -FROM alpine +FROM alpine as deploy +WORKDIR /app # label used in other scripts to filter LABEL type="threagile" # add certificates -RUN apk add --update --no-cache ca-certificates \ -# add graphviz, fonts \ - graphviz ttf-freefont \ -# https://stackoverflow.com/questions/66963068/docker-alpine-executable-binary-not-found-even-if-in-path \ - libc6-compat && \ +RUN apk add --update --no-cache ca-certificates +# add graphviz, fonts +RUN apk add --update --no-cache graphviz ttf-freefont +# https://stackoverflow.com/questions/66963068/docker-alpine-executable-binary-not-found-even-if-in-path +RUN apk add libc6-compat # https://stackoverflow.com/questions/34729748/installed-go-binary-not-found-in-path-on-alpine-linux-docker # RUN mkdir -p /lib64 && ln -s /lib/libc.musl-x86_64.so.1 /lib64/ld-linux-x86-64.so.2 # clean apk cache - rm -rf /var/cache/apk/* && \ -# create application and data directories - mkdir -p /app /data && \ - chown -R 1000:1000 /app /data +RUN rm -rf /var/cache/apk/* -COPY --from=files / / +RUN mkdir -p /app /data +RUN chown -R 1000:1000 /app /data + +COPY --from=build --chown=1000:1000 /app/threagile /app/ +COPY --from=build --chown=1000:1000 /app/raa_calc /app/ +COPY --from=build --chown=1000:1000 /app/raa_dummy /app/ +COPY --from=build --chown=1000:1000 /app/risk_demo_rule /app/ +COPY --from=build --chown=1000:1000 /app/LICENSE.txt /app/ +COPY --from=build --chown=1000:1000 /app/report/template/background.pdf /app/ +COPY --from=build --chown=1000:1000 /app/support/openapi.yaml /app/ +COPY --from=build --chown=1000:1000 /app/support/schema.json /app/ +COPY --from=build --chown=1000:1000 /app/support/live-templates.txt /app/ +COPY --from=build --chown=1000:1000 /app/demo/example/threagile-example-model.yaml /app/ +COPY --from=build --chown=1000:1000 /app/demo/stub/threagile-stub-model.yaml /app/ +COPY --from=build --chown=1000:1000 /app/server /app/server USER 1000:1000 -WORKDIR /app -ENV PATH=/app:$PATH \ - GIN_MODE=release +ENV PATH=/app:$PATH GIN_MODE=release ENTRYPOINT ["/app/threagile"] -CMD ["-help"] +CMD ["help"] diff --git a/Dockerfile.local b/Dockerfile.local index c0887604..6003a64c 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -1,79 +1,87 @@ -# Used for local manual test builds +# Used for local manual test builds as well as Anchore scans in github workflow ###### -## Stage 1: Clone the Git repository +## Stage 1: Build application with Go's build tools ###### -FROM alpine/git as clone -WORKDIR /app -#RUN git clone https://github.com/threagile/threagile.git -COPY . /app/threagile +FROM docker.io/library/golang:alpine as build +COPY . /app +WORKDIR /app +ARG GOOS=linux +ENV GO111MODULE=on +# download dependencies +RUN go mod download -###### -## Stage 2: Build application with Go's build tools -###### -FROM golang as build -ENV GO111MODULE=on -# https://stackoverflow.com/questions/36279253/go-compiled-binary-wont-run-in-an-alpine-docker-container-on-ubuntu-host -#ENV CGO_ENABLED=0 # cannot be set as otherwise plugins don't run -WORKDIR /app -COPY --from=clone /app/threagile /app +# Set build-time variables RUN go version RUN go test ./... -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o raa.so raa/raa/raa.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o dummy.so raa/dummy/dummy.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -buildmode=plugin -o demo-rule.so risks/custom/demo/demo-rule.go -RUN GOOS=linux go build -a -trimpath -ldflags="-s -w -X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o threagile -# add the -race parameter to go build call in order to instrument with race condition detector: https://blog.golang.org/race-detector +# build binaries +RUN go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o raa_calc cmd/raa/main.go +RUN go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o raa_dummy cmd/raa_dummy/main.go +RUN go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o risk_demo_rule cmd/risk_demo/main.go +RUN go build -ldflags="-X main.buildTimestamp=$(date '+%Y%m%d%H%M%S')" -o threagile cmd/threagile/main.go +# add the -race parameter to go build call in order to instrument with race condition detector: https://blog.golang.org/race-detector +# NOTE: copy files with final name to send to final build + +# copy assets +RUN cp /app/demo/example/threagile.yaml /app/demo/example/threagile-example-model.yaml +RUN cp /app/demo/stub/threagile.yaml /app/demo/stub/threagile-stub-model.yaml ###### -## Stage 3: Make final small image +## Stage 2: Make final small image ###### -FROM alpine +#FROM alpine:edge as finalize +FROM alpine:latest as finalize # label used in other scripts to filter LABEL type="threagile" -# add certificates -RUN apk add ca-certificates -# add graphviz, fonts +# update vulnerable packages +RUN apk add libcrypto3=3.1.4-r5 +RUN apk add libssl3=3.1.4-r5 + +# add certificates, graphviz, fonts +RUN apk add --update --no-cache ca-certificates RUN apk add --update --no-cache graphviz ttf-freefont + # https://stackoverflow.com/questions/66963068/docker-alpine-executable-binary-not-found-even-if-in-path RUN apk add libc6-compat + # https://stackoverflow.com/questions/34729748/installed-go-binary-not-found-in-path-on-alpine-linux-docker # RUN mkdir -p /lib64 && ln -s /lib/libc.musl-x86_64.so.1 /lib64/ld-linux-x86-64.so.2 + # clean apk cache RUN rm -rf /var/cache/apk/* +# add non-privileged user WORKDIR /app - -COPY --from=build /app/threagile /app/threagile -COPY --from=build /app/raa.so /app/raa.so -COPY --from=build /app/dummy.so /app/dummy.so -COPY --from=build /app/demo-rule.so /app/demo-rule.so -COPY --from=build /app/LICENSE.txt /app/LICENSE.txt -COPY --from=build /app/report/template/background.pdf /app/background.pdf -COPY --from=build /app/support/openapi.yaml /app/openapi.yaml -COPY --from=build /app/support/schema.json /app/schema.json -COPY --from=build /app/support/live-templates.txt /app/live-templates.txt -COPY --from=build /app/support/render-data-asset-diagram.sh /app/render-data-asset-diagram.sh -COPY --from=build /app/support/render-data-flow-diagram.sh /app/render-data-flow-diagram.sh -COPY --from=build /app/server /app/server -COPY --from=build /app/demo/example/threagile.yaml /app/threagile-example-model.yaml -COPY --from=build /app/demo/stub/threagile.yaml /app/threagile-stub-model.yaml - -RUN mkdir /data - -RUN chown -R 1000:1000 /app /data -USER 1000:1000 +RUN adduser --disabled-password --gecos "" --home "$(pwd)" --no-create-home threagile + +RUN mkdir -p /app /data +RUN chown -R threagile:threagile /app /data + +USER threagile + +COPY --from=build --chown=threagile:threagile /app/threagile /app/ +COPY --from=build --chown=threagile:threagile /app/raa_calc /app/ +COPY --from=build --chown=threagile:threagile /app/raa_dummy /app/ +COPY --from=build --chown=threagile:threagile /app/risk_demo_rule /app/ +COPY --from=build --chown=threagile:threagile /app/LICENSE.txt /app/ +COPY --from=build --chown=threagile:threagile /app/report/template/background.pdf /app/ +COPY --from=build --chown=threagile:threagile /app/support/openapi.yaml /app/ +COPY --from=build --chown=threagile:threagile /app/support/schema.json /app/ +COPY --from=build --chown=threagile:threagile /app/support/live-templates.txt /app/ +COPY --from=build --chown=threagile:threagile /app/demo/example/threagile-example-model.yaml /app/ +COPY --from=build --chown=threagile:threagile /app/demo/stub/threagile-stub-model.yaml /app/ +COPY --from=build --chown=threagile:threagile /app/server /app/server ENV PATH=/app:$PATH ENV GIN_MODE=release ENTRYPOINT ["/app/threagile"] -CMD ["-help"] \ No newline at end of file +CMD ["help"] diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..c16cea52 --- /dev/null +++ b/Makefile @@ -0,0 +1,73 @@ +# Files and Folders +ASSET_DIR = $(HOME)/.threagile +BIN_DIR = $(HOME)/bin +ASSETS = \ + LICENSE.txt \ + report/template/background.pdf \ + support/openapi.yaml \ + support/schema.json \ + support/live-templates.txt \ + server +BIN = \ + raa_calc \ + raa_dummy \ + risk_demo_rule \ + threagile + +# Commands and Flags +GOFLAGS = -a -ldflags="-s -w -X main.buildTimestamp=$(shell date '+%Y%m%d%H%M%S')" +GO = env GO111MODULE=on go +MKDIR = mkdir -p +CP = cp -r +RM = rm -rf +GOSEC = /opt/homebrew/bin/gosec + +# Targets +.phony: all prep run_tests clean tidy install uninstall gosec + +default: all + +all: prep run_tests $(addprefix bin/,$(BIN)) + +prep: + @# env GO111MODULE=on go mod vendor + $(MKDIR) bin + +run_tests: + $(GO) test ./... + +clean: + $(RM) bin vendor + +tidy: clean + $(RM) .DS_Store + $(RM) just-for-docker-build-?.txt + $(RM) data-asset-diagram.* data-flow-diagram.* + $(RM) report.pdf risks.xlsx tags.xlsx risks.json technical-assets.json stats.json + $(RM) *.exe *.exe~ *.dll *.so *.dylibc *.test *.out + +install: all + mkdir -p $(BIN_DIR) $(ASSET_DIR) + $(CP) $(addprefix bin/,$(BIN)) $(BIN_DIR) + $(CP) $(ASSETS) $(ASSET_DIR) + $(CP) demo/example/threagile.yaml $(ASSET_DIR)/threagile-example-model.yaml + $(CP) demo/stub/threagile.yaml $(ASSET_DIR)/threagile-stub-model.yaml + +uninstall: + $(RM) $(addprefix $(BIN_DIR)/,$(BIN)) + $(RM) $(ASSET_DIR) + +gosec: + $(GOSEC) ./... + +bin/raa_calc: cmd/raa/main.go + $(GO) build $(GOFLAGS) -o $@ $< + +bin/raa_dummy: cmd/raa_dummy/main.go + $(GO) build $(GOFLAGS) -o $@ $< + +bin/risk_demo_rule: cmd/risk_demo/main.go + $(GO) build $(GOFLAGS) -o $@ $< + +bin/threagile: cmd/threagile/main.go + $(GO) build $(GOFLAGS) -o $@ $< diff --git a/README.md b/README.md index d55bf470..f8683402 100644 --- a/README.md +++ b/README.md @@ -31,91 +31,71 @@ The easiest way to execute Threagile on the commandline is via its Docker contai Sourcecode: https://github.com/threagile License: Open-Source (MIT License) - Usage: threagile [options] - - - Options: - - -background string - background pdf file (default "background.pdf") - -create-editing-support - just create some editing support stuff in the output directory - -create-example-model - just create an example model named threagile-example-model.yaml in the output directory - -create-stub-model - just create a minimal stub model named threagile-stub-model.yaml in the output directory - -custom-risk-rules-plugins string - comma-separated list of plugins (.so shared object) file names with custom risk rules to load - -diagram-dpi int - DPI used to render: maximum is 240 (default 120) - -execute-model-macro string - Execute model macro (by ID) - -generate-data-asset-diagram - generate data asset diagram (default true) - -generate-data-flow-diagram - generate data-flow diagram (default true) - -generate-report-pdf - generate report pdf, including diagrams (default true) - -generate-risks-excel - generate risks excel (default true) - -generate-risks-json - generate risks json (default true) - -generate-stats-json - generate stats json (default true) - -generate-tags-excel - generate tags excel (default true) - -generate-technical-assets-json - generate technical assets json (default true) - -ignore-orphaned-risk-tracking - ignore orphaned risk tracking (just log them) not matching a concrete risk - -list-model-macros - print model macros - -list-risk-rules - print risk rules - -list-types - print type information (enum values to be used in models) - -model string - input model yaml file (default "threagile.yaml") - -output string - output directory (default ".") - -print-3rd-party-licenses - print 3rd-party license information - -print-license - print license information - -raa-plugin string - RAA calculation plugin (.so shared object) file name (default "raa.so") - -server int - start a server (instead of commandline execution) on the given port - -skip-risk-rules string - comma-separated list of risk rules (by their ID) to skip - -verbose - verbose output - -version - print version + Usage: + threagile [flags] + threagile [command] + + Available Commands: + create-editing-support Create editing support + create-example-model Create example threagile model + create-stub-model Create stub threagile model + execute-model-macro Execute model macro + explain-model-macros Explain model macros + explain-risk-rules Detailed explanation of all the risk rules + explain-types Print type information (enum values to be used in models) + help Help about any command + list-model-macros Print model macros + list-risk-rules Print available risk rules + list-types Print type information (enum values to be used in models) + print-license Print license information + server Run server + + Flags: + --app-dir string app folder (default "/app") + --background string background pdf file (default "background.pdf") + --bin-dir string binary folder location (default "/app") + --custom-risk-rules-plugin string comma-separated list of plugins file names with custom risk rules to load + --diagram-dpi int DPI used to render: maximum is 300 + --generate-data-asset-diagram generate data asset diagram (default true) + --generate-data-flow-diagram generate data flow diagram (default true) + --generate-report-pdf generate report pdf, including diagrams (default true) + --generate-risks-excel generate risks excel (default true) + --generate-risks-json generate risks json (default true) + --generate-stats-json generate stats json (default true) + --generate-tags-excel generate tags excel (default true) + --generate-technical-assets-json generate technical assets json (default true) + -h, --help help for threagile + --ignore-orphaned-risk-tracking ignore orphaned risk tracking (just log them) not matching a concrete risk + --model string input model yaml file (default "threagile.yaml") + --output string output directory (default ".") + --raa-run string RAA calculation run file name (default "raa_calc") + --skip-risk-rules string comma-separated list of risk rules (by their ID) to skip + --temp-dir string temporary folder location (default "/dev/shm") + -v, --verbose verbose output Examples: If you want to create an example model (via docker) as a starting point to learn about Threagile just run: - docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -create-example-model -output /app/work + docker run --rm -it -v "$(pwd)":/app/work threagile/threagile create-example-model -output /app/work If you want to create a minimal stub model (via docker) as a starting point for your own model just run: - docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -create-stub-model -output /app/work + docker run --rm -it -v "$(pwd)":/app/work threagile/threagile create-stub-model -output /app/work If you want to execute Threagile on a model yaml file (via docker): docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -verbose -model /app/work/threagile.yaml -output /app/work If you want to run Threagile as a server (REST API) on some port (here 8080): - docker run --rm -it --shm-size=256m -p 8080:8080 --name threagile-server --mount 'type=volume,src=threagile-storage,dst=/data,readonly=false' threagile/threagile -server 8080 + docker run --rm -it --shm-size=256m -p 8080:8080 --name threagile-server --mount 'type=volume,src=threagile-storage,dst=/data,readonly=false' threagile/threagile server --server-port 8080 If you want to find out about the different enum values usable in the model yaml file: - docker run --rm -it threagile/threagile -list-types + docker run --rm -it threagile/threagile list-types If you want to use some nice editing help (syntax validation, autocompletion, and live templates) in your favourite IDE: - docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -create-editing-support -output /app/work + docker run --rm -it -v "$(pwd)":/app/work threagile/threagile create-editing-support -output /app/work If you want to list all available model macros (which are macros capable of reading a model yaml file, asking you questions in a wizard-style and then update the model yaml file accordingly): - docker run --rm -it threagile/threagile -list-model-macros + docker run --rm -it threagile/threagile list-model-macros If you want to execute a certain model macro on the model yaml file (here the macro add-build-pipeline): - docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -model /app/work/threagile.yaml -output /app/work -execute-model-macro add-build-pipeline + docker run --rm -it -v "$(pwd)":/app/work threagile/threagile -model /app/work/threagile.yaml -output /app/work execute-model-macro add-build-pipeline diff --git a/raa/raa/raa.go b/cmd/raa/main.go similarity index 55% rename from raa/raa/raa.go rename to cmd/raa/main.go index a0b98aa3..6ff37d65 100644 --- a/raa/raa/raa.go +++ b/cmd/raa/main.go @@ -1,17 +1,86 @@ package main import ( - "github.com/threagile/threagile/model" + "encoding/json" + "flag" + "fmt" + "io" + "os" "sort" + + "github.com/threagile/threagile/pkg/security/types" ) -// used from plugin caller: -func CalculateRAA() string { - for techAssetID, techAsset := range model.ParsedModelRoot.TechnicalAssets { - aa := calculateAttackerAttractiveness(techAsset) - aa += calculatePivotingNeighbourEffectAdjustment(techAsset) - techAsset.RAA = calculateRelativeAttackerAttractiveness(aa) - model.ParsedModelRoot.TechnicalAssets[techAssetID] = techAsset +// used from run caller: + +func main() { + inputFilename := flag.String("in", "", "input file") + outputFilename := flag.String("out", "", "output file") + flag.Parse() + + var data []byte + var inputError error + if len(*inputFilename) > 0 { + data, inputError = os.ReadFile(*inputFilename) + if inputError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to read input file %q: %v\n", *inputFilename, inputError) + os.Exit(-2) + } + } else { + data, inputError = io.ReadAll(os.Stdin) + if inputError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to read input from stdin: %v\n", inputError) + os.Exit(-2) + } + } + + // _ = os.WriteFile("raa_in.json", data, 0644) + + var input types.ParsedModel + parseError := json.Unmarshal(data, &input) + if parseError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to parse model: %v\n", parseError) + os.Exit(-2) + } + + text := CalculateRAA(&input) + outData, marshalError := json.MarshalIndent(input, "", " ") + if marshalError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to print model: %v\n", marshalError) + os.Exit(-2) + } + + // _ = os.WriteFile("raa_out.json", outData, 0644) + + var outputFile io.Writer = os.Stdout + if len(*outputFilename) > 0 { + file, outputError := os.Open(*outputFilename) + if outputError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to open output file %q: %v\n", *outputFilename, outputError) + os.Exit(-2) + } + + defer closeFile(file) + outputFile = file + } + + _, _ = fmt.Fprint(outputFile, string(outData)) + _ = text + // _, _ = fmt.Fprint(os.Stderr, text) + + os.Exit(0) +} + +func closeFile(file io.Closer) { + _ = file.Close() +} + +func CalculateRAA(input *types.ParsedModel) string { + for techAssetID, techAsset := range input.TechnicalAssets { + aa := calculateAttackerAttractiveness(input, techAsset) + aa += calculatePivotingNeighbourEffectAdjustment(input, techAsset) + techAsset.RAA = calculateRelativeAttackerAttractiveness(input, aa) + input.TechnicalAssets[techAssetID] = techAsset } // return intro text (for reporting etc., can be short summary-like) return "For each technical asset the \"Relative Attacker Attractiveness\" (RAA) value was calculated " + @@ -26,24 +95,24 @@ func CalculateRAA() string { var attackerAttractivenessMinimum, attackerAttractivenessMaximum, spread float64 = 0, 0, 0 // set the concrete value in relation to the minimum and maximum of all -func calculateRelativeAttackerAttractiveness(attractiveness float64) float64 { +func calculateRelativeAttackerAttractiveness(input *types.ParsedModel, attractiveness float64) float64 { if attackerAttractivenessMinimum == 0 || attackerAttractivenessMaximum == 0 { attackerAttractivenessMinimum, attackerAttractivenessMaximum = 9223372036854775807, -9223372036854775808 // determine (only one time required) the min/max of all // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order // range over them in sorted (hence re-producible) way: keys := make([]string, 0) - for k, _ := range model.ParsedModelRoot.TechnicalAssets { + for k := range input.TechnicalAssets { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { - techAsset := model.ParsedModelRoot.TechnicalAssets[key] - if calculateAttackerAttractiveness(techAsset) > attackerAttractivenessMaximum { - attackerAttractivenessMaximum = calculateAttackerAttractiveness(techAsset) + techAsset := input.TechnicalAssets[key] + if calculateAttackerAttractiveness(input, techAsset) > attackerAttractivenessMaximum { + attackerAttractivenessMaximum = calculateAttackerAttractiveness(input, techAsset) } - if calculateAttackerAttractiveness(techAsset) < attackerAttractivenessMinimum { - attackerAttractivenessMinimum = calculateAttackerAttractiveness(techAsset) + if calculateAttackerAttractiveness(input, techAsset) < attackerAttractivenessMinimum { + attackerAttractivenessMinimum = calculateAttackerAttractiveness(input, techAsset) } } if !(attackerAttractivenessMinimum < attackerAttractivenessMaximum) { @@ -53,7 +122,7 @@ func calculateRelativeAttackerAttractiveness(attractiveness float64) float64 { } // calculate the percent value of the value within the defined min/max range value := attractiveness - attackerAttractivenessMinimum - percent := float64(value) / float64(spread) * 100 + percent := value / spread * 100 if percent <= 0 { percent = 1 // since 0 suggests no attacks at all } @@ -61,18 +130,18 @@ func calculateRelativeAttackerAttractiveness(attractiveness float64) float64 { } // increase the RAA (relative attacker attractiveness) by one third (1/3) of the delta to the highest outgoing neighbour (if positive delta) -func calculatePivotingNeighbourEffectAdjustment(techAsset model.TechnicalAsset) float64 { +func calculatePivotingNeighbourEffectAdjustment(input *types.ParsedModel, techAsset types.TechnicalAsset) float64 { if techAsset.OutOfScope { return 0 } adjustment := 0.0 for _, commLink := range techAsset.CommunicationLinks { - outgoingNeighbour := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId] + outgoingNeighbour := input.TechnicalAssets[commLink.TargetId] //if outgoingNeighbour.getTrustBoundary() == techAsset.getTrustBoundary() { // same trust boundary - delta := calculateRelativeAttackerAttractiveness(calculateAttackerAttractiveness(outgoingNeighbour)) - calculateRelativeAttackerAttractiveness(calculateAttackerAttractiveness(techAsset)) + delta := calculateRelativeAttackerAttractiveness(input, calculateAttackerAttractiveness(input, outgoingNeighbour)) - calculateRelativeAttackerAttractiveness(input, calculateAttackerAttractiveness(input, techAsset)) if delta > 0 { potentialIncrease := delta / 3 - //fmt.Println("Positive delta from", techAsset.Id, "to", outgoingNeighbour.Id, "is", delta, "yields to pivoting eighbour effect of an incrase of", potentialIncrease) + //fmt.Println("Positive delta from", techAsset.Id, "to", outgoingNeighbour.Id, "is", delta, "yields to pivoting neighbour effect of an increase of", potentialIncrease) if potentialIncrease > adjustment { adjustment = potentialIncrease } @@ -84,7 +153,7 @@ func calculatePivotingNeighbourEffectAdjustment(techAsset model.TechnicalAsset) // The sum of all CIAs of the asset itself (fibonacci scale) plus the sum of the comm-links' transferred CIAs // Multiplied by the quantity values of the data asset for C and I (not A) -func calculateAttackerAttractiveness(techAsset model.TechnicalAsset) float64 { +func calculateAttackerAttractiveness(input *types.ParsedModel, techAsset types.TechnicalAsset) float64 { if techAsset.OutOfScope { return 0 } @@ -93,49 +162,51 @@ func calculateAttackerAttractiveness(techAsset model.TechnicalAsset) float64 { score += techAsset.Integrity.AttackerAttractivenessForAsset() score += techAsset.Availability.AttackerAttractivenessForAsset() for _, dataAssetProcessed := range techAsset.DataAssetsProcessed { - dataAsset := model.ParsedModelRoot.DataAssets[dataAssetProcessed] + dataAsset := input.DataAssets[dataAssetProcessed] score += dataAsset.Confidentiality.AttackerAttractivenessForProcessedOrStoredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Integrity.AttackerAttractivenessForProcessedOrStoredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Availability.AttackerAttractivenessForProcessedOrStoredData() } + // NOTE: Assuming all stored data is also processed, this effectively scores stored data twice for _, dataAssetStored := range techAsset.DataAssetsStored { - dataAsset := model.ParsedModelRoot.DataAssets[dataAssetStored] + dataAsset := input.DataAssets[dataAssetStored] score += dataAsset.Confidentiality.AttackerAttractivenessForProcessedOrStoredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Integrity.AttackerAttractivenessForProcessedOrStoredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Availability.AttackerAttractivenessForProcessedOrStoredData() } + // NOTE: To send or receive data effectively is processing that data and it's questionable if the attractiveness increases further for _, dataFlow := range techAsset.CommunicationLinks { for _, dataAssetSent := range dataFlow.DataAssetsSent { - dataAsset := model.ParsedModelRoot.DataAssets[dataAssetSent] + dataAsset := input.DataAssets[dataAssetSent] score += dataAsset.Confidentiality.AttackerAttractivenessForInOutTransferredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Integrity.AttackerAttractivenessForInOutTransferredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Availability.AttackerAttractivenessForInOutTransferredData() } for _, dataAssetReceived := range dataFlow.DataAssetsReceived { - dataAsset := model.ParsedModelRoot.DataAssets[dataAssetReceived] + dataAsset := input.DataAssets[dataAssetReceived] score += dataAsset.Confidentiality.AttackerAttractivenessForInOutTransferredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Integrity.AttackerAttractivenessForInOutTransferredData() * dataAsset.Quantity.QuantityFactor() score += dataAsset.Availability.AttackerAttractivenessForInOutTransferredData() } } - if techAsset.Technology == model.LoadBalancer || techAsset.Technology == model.ReverseProxy { + if techAsset.Technology == types.LoadBalancer || techAsset.Technology == types.ReverseProxy { score = score / 5.5 } - if techAsset.Technology == model.Monitoring { + if techAsset.Technology == types.Monitoring { score = score / 5 } - if techAsset.Technology == model.ContainerPlatform { + if techAsset.Technology == types.ContainerPlatform { score = score * 5 } - if techAsset.Technology == model.Vault { + if techAsset.Technology == types.Vault { score = score * 2 } - if techAsset.Technology == model.BuildPipeline || techAsset.Technology == model.SourcecodeRepository || techAsset.Technology == model.ArtifactRegistry { + if techAsset.Technology == types.BuildPipeline || techAsset.Technology == types.SourcecodeRepository || techAsset.Technology == types.ArtifactRegistry { score = score * 2 } - if techAsset.Technology == model.IdentityProvider || techAsset.Technology == model.IdentityStoreDatabase || techAsset.Technology == model.IdentityStoreLDAP { + if techAsset.Technology == types.IdentityProvider || techAsset.Technology == types.IdentityStoreDatabase || techAsset.Technology == types.IdentityStoreLDAP { score = score * 2.5 - } else if techAsset.Type == model.Datastore { + } else if techAsset.Type == types.Datastore { score = score * 2 } if techAsset.MultiTenant { diff --git a/cmd/raa_dummy/main.go b/cmd/raa_dummy/main.go new file mode 100644 index 00000000..2e974b0e --- /dev/null +++ b/cmd/raa_dummy/main.go @@ -0,0 +1,59 @@ +package main + +import ( + "bufio" + "crypto/rand" + "encoding/json" + "fmt" + "github.com/threagile/threagile/pkg/security/types" + "io" + "math/big" + "os" + "time" +) + +// JUST A DUMMY TO HAVE AN ALTERNATIVE PLUGIN TO USE/TEST + +func main() { + reader := bufio.NewReader(os.Stdin) + + inData, outError := io.ReadAll(reader) + if outError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to read model data from stdin\n") + os.Exit(-2) + } + + var input types.ParsedModel + inError := json.Unmarshal(inData, &input) + if inError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to parse model: %v\n", inError) + os.Exit(-2) + } + + text := CalculateRAA(&input) + outData, marshalError := json.Marshal(input) + if marshalError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to print model: %v\n", marshalError) + os.Exit(-2) + } + + _, _ = fmt.Fprint(os.Stdout, outData) + _, _ = fmt.Fprint(os.Stderr, text) + os.Exit(0) +} + +// used from run caller: + +func CalculateRAA(input *types.ParsedModel) string { + for techAssetID, techAsset := range input.TechnicalAssets { + nBig, randError := rand.Int(rand.Reader, big.NewInt(100)) + if randError != nil { + nBig.SetInt64(time.Now().UnixMilli()) + } + techAsset.RAA = float64(nBig.Int64()) + fmt.Println("Using dummy RAA random calculation (just to test the usage of other shared object files as plugins)") + input.TechnicalAssets[techAssetID] = techAsset + } + // return intro text (for reporting etc., can be short summary-like) + return "Just some dummy algorithm implementation for demo purposes of pluggability..." +} diff --git a/cmd/risk_demo/main.go b/cmd/risk_demo/main.go new file mode 100644 index 00000000..77471d32 --- /dev/null +++ b/cmd/risk_demo/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "io" + "os" + + "github.com/threagile/threagile/pkg/model" + "github.com/threagile/threagile/pkg/security/types" +) + +type customRiskRule string + +// exported as symbol (here simply as variable to interface to bundle many functions under one symbol) named "RiskRule" + +var CustomRiskRule customRiskRule + +func main() { + getInfo := flag.Bool("get-info", false, "get rule info") + generateRisks := flag.Bool("generate-risks", false, "generate risks") + flag.Parse() + + if *getInfo { + rule := new(customRiskRule) + category := rule.Category() + riskData, marshalError := json.Marshal(model.CustomRisk{ + ID: category.Id, + Category: category, + Tags: rule.SupportedTags(), + }) + + if marshalError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to print risk data: %v", marshalError) + os.Exit(-2) + } + + _, _ = fmt.Fprint(os.Stdout, riskData) + os.Exit(0) + } + + if *generateRisks { + reader := bufio.NewReader(os.Stdin) + inData, outError := io.ReadAll(reader) + if outError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to read model data from stdin\n") + os.Exit(-2) + } + + var input types.ParsedModel + inError := json.Unmarshal(inData, &input) + if inError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to parse model: %v\n", inError) + os.Exit(-2) + } + + generatedRisks := new(customRiskRule).GenerateRisks(&input) + outData, marshalError := json.Marshal(generatedRisks) + if marshalError != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to print generated risks: %v\n", marshalError) + os.Exit(-2) + } + + _, _ = fmt.Fprint(os.Stdout, outData) + os.Exit(0) + } + + flag.Usage() + os.Exit(-2) +} + +func (r customRiskRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "demo", + Title: "Just a Demo", + Description: "Demo Description", + Impact: "Demo Impact", + ASVS: "Demo ASVS", + CheatSheet: "https://example.com", + Action: "Demo Action", + Mitigation: "Demo Mitigation", + Check: "Demo Check", + Function: types.Development, + STRIDE: types.Tampering, + DetectionLogic: "Demo Detection", + RiskAssessment: "Demo Risk Assessment", + FalsePositives: "Demo False Positive.", + ModelFailurePossibleReason: false, + CWE: 0, + } +} + +func (r customRiskRule) SupportedTags() []string { + return []string{"demo tag"} +} + +func (r customRiskRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + generatedRisks := make([]types.Risk, 0) + for _, techAsset := range parsedModel.TechnicalAssets { + generatedRisks = append(generatedRisks, createRisk(techAsset)) + } + return generatedRisks +} + +func createRisk(technicalAsset types.TechnicalAsset) types.Risk { + risk := types.Risk{ + CategoryId: CustomRiskRule.Category().Id, + Severity: types.CalculateSeverity(types.VeryLikely, types.MediumImpact), + ExploitationLikelihood: types.VeryLikely, + ExploitationImpact: types.MediumImpact, + Title: "Demo risk at " + technicalAsset.Title + "", + MostRelevantTechnicalAssetId: technicalAsset.Id, + DataBreachProbability: types.Possible, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + return risk +} diff --git a/cmd/threagile/config.json b/cmd/threagile/config.json new file mode 100644 index 00000000..aa2f4ffe --- /dev/null +++ b/cmd/threagile/config.json @@ -0,0 +1,3 @@ +{ + "appFolder": "config-app-folder" +} diff --git a/cmd/threagile/main.go b/cmd/threagile/main.go new file mode 100644 index 00000000..d5bdc947 --- /dev/null +++ b/cmd/threagile/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "github.com/threagile/threagile/internal/threagile" +) + +var ( + buildTimestamp = "" +) + +func main() { + new(threagile.Threagile).Init(buildTimestamp).Execute() +} diff --git a/cmd/threagile/main_test.go b/cmd/threagile/main_test.go new file mode 100644 index 00000000..a531c359 --- /dev/null +++ b/cmd/threagile/main_test.go @@ -0,0 +1,82 @@ +package main + +import ( + "encoding/json" + "fmt" + "github.com/akedrou/textdiff" + "github.com/threagile/threagile/pkg/input" + "os" + "path/filepath" + "sort" + "strings" + "testing" +) + +func TestParseModelYaml(t *testing.T) { + flatModelFile := filepath.Join("..", "..", "test", "all.yaml") + flatModel := *new(input.Model).Defaults() + flatLoadError := flatModel.Load(flatModelFile) + if flatLoadError != nil { + t.Errorf("unable to parse model yaml %q: %v", flatModelFile, flatLoadError) + return + } + + sort.Strings(flatModel.TagsAvailable) + flatModel.TagsAvailable = []string{strings.Join(flatModel.TagsAvailable, ", ")} + + flatData, flatMarshalError := json.MarshalIndent(flatModel, "", " ") + if flatMarshalError != nil { + t.Errorf("unable to print model yaml %q: %v", flatModelFile, flatMarshalError) + return + } + + splitModelFile := filepath.Join("..", "..", "test", "main.yaml") + splitModel := *new(input.Model).Defaults() + splitLoadError := splitModel.Load(splitModelFile) + if splitLoadError != nil { + t.Errorf("unable to parse model yaml %q: %v", splitModelFile, splitLoadError) + return + } + + sort.Strings(splitModel.TagsAvailable) + splitModel.TagsAvailable = []string{strings.Join(splitModel.TagsAvailable, ", ")} + + splitModel.Includes = flatModel.Includes + splitData, splitMarshalError := json.MarshalIndent(splitModel, "", " ") + if splitMarshalError != nil { + t.Errorf("unable to print model yaml %q: %v", splitModelFile, splitMarshalError) + return + } + + if string(flatData) != string(splitData) { + t.Errorf("parsing split model files is broken; diff: %v", textdiff.Unified(flatModelFile, splitModelFile, string(flatData), string(splitData))) + return + } +} + +func TestParseModelJson(t *testing.T) { + modelFile := filepath.Join("..", "..", "test", "all.yaml") + model := *new(input.Model).Defaults() + flatLoadError := model.Load(modelFile) + if flatLoadError != nil { + t.Errorf("unable to parse model yaml %q: %v", modelFile, flatLoadError) + return + } + + modelJson, marshalError := json.MarshalIndent(model, "", " ") + if marshalError != nil { + t.Error("Unable to print model json: ", marshalError) + return + } + + var modelStruct input.Model + unmarshalError := json.Unmarshal(modelJson, &modelStruct) + if unmarshalError != nil { + jsonFile := "test.json" + _ = os.WriteFile(jsonFile, modelJson, 0644) + fmt.Printf("Yaml file: %v\n", modelFile) + fmt.Printf("Json file: %v\n", jsonFile) + t.Error("Unable to parse model json: ", unmarshalError) + return + } +} diff --git a/cmd/threagile/threagile.yaml b/cmd/threagile/threagile.yaml new file mode 100644 index 00000000..77815f77 --- /dev/null +++ b/cmd/threagile/threagile.yaml @@ -0,0 +1,1354 @@ +threagile_version: 1.0.0 + +# NOTE: +# +# For a perfect editing experience within your IDE of choice you can easily +# get model syntax validation and autocompletion (very handy for enum values) +# as well as live templates: Just import the schema.json into your IDE and assign +# it as "schema" to each Threagile YAML file. Also try to import individual parts +# from the live-templates.txt file into your IDE as live editing templates. +# +# You might also want to try the REST API when running in server mode... + + + +title: Some Example Application + +date: 2020-07-01 + +author: + name: John Doe + homepage: www.example.com + + + + +management_summary_comment: > + Just some more custom summary possible here... + +business_criticality: important # values: archive, operational, important, critical, mission-critical + + + + +business_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 + + +technical_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 + + + +questions: # simply use "" as answer to signal "unanswered" + How are the admin clients managed/protected against compromise?: "" + How are the development clients managed/protected against compromise?: > + Managed by XYZ + How are the build pipeline components managed/protected against compromise?: > + Managed by XYZ + + + +abuse_cases: + Denial-of-Service: > + As a hacker I want to disturb the functionality of the backend system in order to cause indirect + financial damage via unusable features. + CPU-Cycle Theft: > + As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners. + Ransomware: > + As a hacker I want to encrypt the storage and file systems in order to demand ransom. + Identity Theft: > + As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside. + PII Theft: > + As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage + their repudiation by publishing them. + + ERP-System Compromise: > + As a hacker I want to access the ERP-System in order to steal/modify sensitive business data. + Database Compromise: > + As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive + business data. + Contract Filesystem Compromise: > + As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data. + Cross-Site Scripting Attacks: > + As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and + cause reputational damage. + Denial-of-Service of Enduser Functionality: > + As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial + damage (lower sales). + Denial-of-Service of ERP/DB Functionality: > + As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect + financial damage via unusable internal ERP features (not related to customer portal). + + +security_requirements: + Input Validation: Strict input validation is required to reduce the overall attack surface. + Securing Administrative Access: Administrative access must be secured with strong encryption and multi-factor authentication. + EU-DSGVO: Mandatory EU-Datenschutzgrundverordnung + + +# Tags can be used for anything, it's just a tag. Also risk rules can act based on tags if you like. +# Tags can be used for example to name the products used (which is more concrete than the technology types that only specify the type) +tags_available: + - linux + - apache + - mysql + - jboss + - keycloak + - jenkins + - git + - oracle + - some-erp + - vmware + - aws + - aws:ec2 + - aws:s3 + + + + +data_assets: + + + Customer Contracts: &customer-contracts # this example shows the inheritance-like features of YAML + id: customer-contracts + description: Customer Contracts (PDF) + usage: business # values: business, devops + tags: + origin: Customer + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. + + + Customer Contract Summaries: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: contract-summaries + description: Customer Contract Summaries + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Just some summaries. + + + Customer Operational Data: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-operational-data + description: Customer Operational Data + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer operational data for using the portal are required to be available to offer the portal functionality + and are used in the backend transactions. + + + Customer Accounts: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-accounts + description: Customer Accounts (including transient credentials when entered for checking them) + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer account data for using the portal are required to be available to offer the portal functionality. + + + Some Internal Business Data: + id: internal-business-data + description: Internal business data of the ERP system used unrelated to the customer-facing processes. + usage: business # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for + internal non-customer-portal-related stuff). + + + Client Application Code: &client-application-code # this example shows the inheritance-like features of YAML + id: client-application-code + description: Angular and other client-side code delivered by the application. + usage: devops # values: business, devops + tags: + origin: Company ABC + owner: Company ABC + quantity: very-few # values: very-few, few, many, very-many + confidentiality: public # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the public data is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Server Application Code: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: server-application-code + description: API and other server-side code of the application. + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the API code is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Build Job Config: + id: build-job-config + description: Data for customizing of the build job system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the build job system. + + + Marketing Material: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: marketing-material + description: Website and marketing data to inform potential customers and generate new leads. + integrity: important # values: archive, operational, important, critical, mission-critical + + + ERP Logs: + id: erp-logs + description: Logs generated by the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: archive # values: archive, operational, important, critical, mission-critical + availability: archive # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard + transactional logs. + + + ERP Customizing Data: + id: erp-customizing + description: Data for customizing of the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the ERP system. + + + Database Customizing and Dumps: + id: db-dumps + description: Data for customizing of the DB system, which might include full database dumps. + usage: devops # values: business, devops + tags: + - oracle + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the DB system, which might include full database dumps. + + + + + + +technical_assets: + + + Customer Web Client: + id: customer-client + description: Customer Web Client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by enduser customer + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Customer + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by the customer to access the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Customer Traffic: + target: load-balancer + description: Link to the load balancer + protocol: https # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backoffice Client: + id: backoffice-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backoffice client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by Company XYZ company + size: component # values: system, service, application, component + technology: desktop # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer and use the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-contracts + - internal-business-data + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Internal Access: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + - some-erp + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Marketing CMS Editing: + target: marketing-cms + description: Link to the CMS for editing content + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backend Admin Client: + id: backend-admin-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backend admin client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by ops provider + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Web Access: + target: erp-system + description: Link to the ERP system (Web) + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - erp-customizing + data_assets_received: # sequence of IDs to reference + - erp-logs + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + DB Update Access: + target: sql-database + description: Link to the database (JDBC tunneled via SSH) + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - db-dumps + data_assets_received: # sequence of IDs to reference + - db-dumps + - erp-logs + - customer-accounts + - customer-operational-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + User Management Access: + target: ldap-auth-server + description: Link to the LDAP auth server for managing users + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Load Balancer: + id: load-balancer + #diagram_tweak_order: 50 # affects left to right positioning (only within a trust boundary) + description: Load Balancer (HA-Proxy) + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: load-balancer # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ + usages of the portal and ERP system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Web Application Traffic: + target: apache-webserver + description: Link to the web server + protocol: http # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + CMS Content Traffic: + target: marketing-cms + description: Link to the CMS server + protocol: http # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + + + Apache Webserver: + id: apache-webserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Apache Webserver hosting the API code and client-side code + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: web-server # values: see help + tags: + - linux + - apache + - aws:ec2 + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - json + - file + communication_links: + ERP System Traffic: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + Auth Credential Check Traffic: + target: identity-provider + description: Link to the identity provider server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + Identity Provider: + id: identity-provider + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Identity provider server + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-provider # values: see help + tags: + - linux + - jboss + - keycloak + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + LDAP Credential Check Traffic: + target: ldap-auth-server + description: Link to the LDAP server + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + LDAP Auth Server: + id: ldap-auth-server + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: LDAP authentication server + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-store-ldap # values: see help + tags: + - linux + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: transparent # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - customer-accounts + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + + + Marketing CMS: + id: marketing-cms + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: CMS for the marketing content + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: cms # values: see help + tags: + - linux + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - marketing-material + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Auth Traffic: + target: ldap-auth-server + description: Link to the LDAP auth server + protocol: ldap # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + + + Backoffice ERP System: + id: erp-system + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: ERP system + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: erp # values: see help + tags: + - linux + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other + Company XYZ internal processes. + multi_tenant: false + redundant: true + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - erp-customizing + data_assets_stored: # sequence of IDs to reference + - erp-logs + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - xml + - file + - serialization + communication_links: + Database Traffic: + target: sql-database + description: Link to the DB system + protocol: jdbc # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + NFS Filesystem Access: + target: contract-fileserver + description: Link to the file system + protocol: nfs # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-contracts + data_assets_received: # sequence of IDs to reference + - customer-contracts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Contract Fileserver: + id: contract-fileserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: NFS Filesystem for storing the contract PDFs + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: file-server # values: see help + tags: + - linux + - aws:s3 + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. The filesystem is also required to be available + for storing new contracts of freshly generated customers. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + data_assets_stored: # sequence of IDs to reference + - customer-contracts + - contract-summaries + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + + + Customer Contract Database: + id: sql-database + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: The database behind the ERP system + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: database # values: see help + tags: + - linux + - mysql + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: data-with-symmetric-shared-key # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also + for other Company XYZ internal processes. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - db-dumps + data_assets_stored: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + + + External Development Client: + id: external-dev-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: External developer client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by external developers + size: system # values: system, service, application, component + technology: devops-client # values: see help + tags: + - linux + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: External Developers + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The clients used by external developers to create parts of the application code. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + Git-Repo Code Write Access: + target: git-repo + description: Link to the Git repo + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Git-Repo Web-UI Access: + target: git-repo + description: Link to the Git repo + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Jenkins Web-UI Access: + target: jenkins-buildserver + description: Link to the Jenkins build server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - build-job-config + data_assets_received: # sequence of IDs to reference + - build-job-config + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Git Repository: + id: git-repo + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Git repository server + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: sourcecode-repository # values: see help + tags: + - linux + - git + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + + + Jenkins Buildserver: + id: jenkins-buildserver + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Jenkins buildserver + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: build-pipeline # values: see help + tags: + - linux + - jenkins + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk + of reputation damage and application update unavailability when the build pipeline is compromised. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + - serialization + communication_links: + Git Repo Code Read Access: + target: git-repo + description: Link to the Git repository server + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Application Deployment: + target: apache-webserver + description: Link to the Apache webserver + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + CMS Updates: + target: marketing-cms + description: Link to the CMS + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + + + + + +trust_boundaries: + + + Web DMZ: + id: web-dmz + description: Web DMZ + type: network-cloud-security-group # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - apache-webserver + - marketing-cms + trust_boundaries_nested: # sequence of IDs to reference + + + ERP DMZ: + id: erp-dmz + description: ERP DMZ + type: network-cloud-security-group # values: see help + tags: + - some-erp + technical_assets_inside: # sequence of IDs to reference + - erp-system + - contract-fileserver + - sql-database + trust_boundaries_nested: # sequence of IDs to reference + + + Application Network: + id: application-network + description: Application Network + type: network-cloud-provider # values: see help + tags: + - aws + technical_assets_inside: # sequence of IDs to reference + - load-balancer + trust_boundaries_nested: # sequence of IDs to reference + - web-dmz + - erp-dmz + - auth-env + + + Auth Handling Environment: + id: auth-env + description: Auth Handling Environment + type: execution-environment # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - identity-provider + - ldap-auth-server + trust_boundaries_nested: # sequence of IDs to reference + + + Dev Network: + id: dev-network + description: Development Network + type: network-on-prem # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - jenkins-buildserver + - git-repo + - backend-admin-client + - backoffice-client + trust_boundaries_nested: # sequence of IDs to reference + + + + + +shared_runtimes: + + + WebApp and Backoffice Virtualization: + id: webapp-virtualization + description: WebApp Virtualization + tags: + - vmware + technical_assets_running: # sequence of IDs to reference + - apache-webserver + - marketing-cms + - erp-system + - contract-fileserver + - sql-database + + + + +individual_risk_categories: # used for adding custom manually identified risks + + + Some Individual Risk Example: + id: something-strange + description: Some text describing the risk category... + impact: Some text describing the impact... + asvs: V0 - Something Strange + cheat_sheet: https://example.com + action: Some text describing the action... + mitigation: Some text describing the mitigation... + check: Check if XYZ... + function: business-side # values: business-side, architecture, development, operations + stride: repudiation # values: spoofing, tampering, repudiation, information-disclosure, denial-of-service, elevation-of-privilege + detection_logic: Some text describing the detection logic... + risk_assessment: Some text describing the risk assessment... + false_positives: Some text describing the most common types of false positives... + model_failure_possible_reason: false + cwe: 693 + risks_identified: + Example Individual Risk at Database: + severity: critical # values: low, medium, elevated, high, critical + exploitation_likelihood: likely # values: unlikely, likely, very-likely, frequent + exploitation_impact: medium # values: low, medium, high, very-high + data_breach_probability: probable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + - sql-database + most_relevant_data_asset: + most_relevant_technical_asset: sql-database + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + Example Individual Risk at Contract Filesystem: + severity: medium # values: low, medium, elevated, high, critical + exploitation_likelihood: frequent # values: unlikely, likely, very-likely, frequent + exploitation_impact: very-high # values: low, medium, high, very-high + data_breach_probability: improbable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + most_relevant_data_asset: + most_relevant_technical_asset: contract-fileserver + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + + + +# NOTE: +# For risk tracking each risk-id needs to be defined (the string with the @ sign in it). These unique risk IDs +# are visible in the PDF report (the small grey string under each risk), the Excel (column "ID"), as well as the JSON responses. +# Some risk IDs have only one @ sign in them, while others multiple. The idea is to allow for unique but still speaking IDs. +# Therefore each risk instance creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. +# Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. Best is to lookup the IDs +# to use in the created Excel file. Alternatively a model macro "seed-risk-tracking" is available that helps in initially +# seeding the risk tracking part here based on already identified and not yet handled risks. +risk_tracking: + + untrusted-deserialization@erp-system: # wildcards "*" between the @ characters are possible + status: accepted # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: Risk accepted as tolerable + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + ldap-injection@*@ldap-auth-server@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-5678 + date: 2020-01-05 + checked_by: John Doe + + unencrypted-asset@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-authentication-second-factor@*@*@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-hardening@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + dos-risky-access-across-trust-boundary@*@*@*: # wildcards "*" between the @ characters are possible + status: in-progress # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures are being implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + + +#diagram_tweak_edge_layout: spline # values: spline, polyline, false, ortho (this suppresses edge labels), curved (this suppresses edge labels and can cause problems with edges) + +#diagram_tweak_suppress_edge_labels: true +#diagram_tweak_layout_left_to_right: true +#diagram_tweak_nodesep: 2 +#diagram_tweak_ranksep: 2 +#diagram_tweak_invisible_connections_between_assets: +# - tech-asset-source-id-A:tech-asset-target-id-B +# - tech-asset-source-id-C:tech-asset-target-id-D +#diagram_tweak_same_rank_assets: +# - tech-asset-source-id-E:tech-asset-target-id-F:tech-asset-source-id-G:tech-asset-target-id-H +# - tech-asset-source-id-M:tech-asset-target-id-N:tech-asset-source-id-O diff --git a/colors/colors.go b/colors/colors.go deleted file mode 100644 index 506e97a6..00000000 --- a/colors/colors.go +++ /dev/null @@ -1,163 +0,0 @@ -package colors - -import ( - "encoding/hex" - "github.com/jung-kurt/gofpdf" -) - -const Red, Amber, Green, Blue, DarkBlue, Black, Gray, LightGray, MiddleLightGray, MoreLightGray, VeryLightGray, ExtremeLightGray, Pink, LightPink = "#CC0000", "#AF780E", "#008000", "#000080", "#000060", "#000000", "#444444", "#666666", "#999999", "#D2D2D2", "#E5E5E5", "#F6F6F6", "#F987C5", "#FFE7EF" -const ExtremeLightBlue, OutOfScopeFancy, CustomDevelopedParts = "#DDFFFF", "#D5D7FF", "#FFFC97" -const LightBlue = "#77FFFF" -const Brown = "#8C4C17" - -func DarkenHexColor(hexString string) string { - colorBytes, _ := hex.DecodeString(hexString[1:]) - adjusted := make([]byte, 3) - for i := 0; i < 3; i++ { - if colorBytes[i] > 0x22 { - adjusted[i] = colorBytes[i] - 0x20 - } else { - adjusted[i] = 0x00 - } - } - return "#" + hex.EncodeToString(adjusted) -} - -func BrightenHexColor(hexString string) string { - colorBytes, _ := hex.DecodeString(hexString[1:]) - adjusted := make([]byte, 3) - for i := 0; i < 3; i++ { - if colorBytes[i] < 0xDD { - adjusted[i] = colorBytes[i] + 0x20 - } else { - adjusted[i] = 0xFF - } - } - return "#" + hex.EncodeToString(adjusted) -} - -func ColorCriticalRisk(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(255, 38, 0) -} -func RgbHexColorCriticalRisk() string { - return "#FF2600" -} - -func ColorHighRisk(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(160, 40, 30) -} -func RgbHexColorHighRisk() string { - return "#A0281E" -} - -func ColorElevatedRisk(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(255, 142, 0) -} -func RgbHexColorElevatedRisk() string { - return "#FF8E00" -} - -func ColorMediumRisk(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(200, 120, 50) -} -func RgbHexColorMediumRisk() string { - return "#C87832" -} - -func ColorLowRisk(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(35, 70, 95) -} -func RgbHexColorLowRisk() string { - return "#23465F" -} - -func ColorOutOfScope(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(127, 127, 127) -} -func RgbHexColorOutOfScope() string { - return "#7F7F7F" -} - -func ColorRiskStatusUnchecked(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(256, 0, 0) -} -func RgbHexColorRiskStatusUnchecked() string { - return "#FF0000" -} - -func ColorRiskStatusMitigated(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(0, 143, 0) -} -func RgbHexColorRiskStatusMitigated() string { - return "#008F00" -} - -func ColorRiskStatusInProgress(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(0, 0, 256) -} -func RgbHexColorRiskStatusInProgress() string { - return "#0000FF" -} - -func ColorRiskStatusAccepted(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(255, 64, 255) -} -func RgbHexColorRiskStatusAccepted() string { - return "#FF40FF" -} - -func ColorRiskStatusInDiscussion(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(256, 147, 0) -} -func RgbHexColorRiskStatusInDiscussion() string { - return "#FF9300" -} - -func ColorRiskStatusFalsePositive(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(102, 102, 102) -} -func RgbHexColorRiskStatusFalsePositive() string { - return "#666666" -} - -func ColorTwilight(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(58, 82, 200) -} -func RgbHexColorTwilight() string { - return "#3A52C8" -} - -func ColorBusiness(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(83, 27, 147) -} -func RgbHexColorBusiness() string { - return "#531B93" -} - -func ColorArchitecture(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(0, 84, 147) -} -func RgbHexColorArchitecture() string { - return "#005493" -} - -func ColorDevelopment(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(222, 146, 35) -} -func RgbHexColorDevelopment() string { - return "#DE9223" -} - -func ColorOperation(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(148, 127, 80) -} -func RgbHexColorOperation() string { - return "#947F50" -} - -func ColorModelFailure(pdf *gofpdf.Fpdf) { - pdf.SetTextColor(148, 82, 0) -} -func RgbHexColorModelFailure() string { - return "#945200" -} diff --git a/demo/stub/threagile.yaml b/demo/stub/threagile.yaml index 1e3539e5..287eacd8 100644 --- a/demo/stub/threagile.yaml +++ b/demo/stub/threagile.yaml @@ -12,7 +12,7 @@ threagile_version: 1.0.0 # This is only a stub for simple quick editing and is not complete. -# For a complete usable example model see the "-create-example-model" option. +# For a complete usable example model see the "create-example-model" option. title: Model Stub diff --git a/go.mod b/go.mod index 5a91844f..6c80e9ac 100644 --- a/go.mod +++ b/go.mod @@ -1,49 +1,64 @@ module github.com/threagile/threagile -go 1.21 +go 1.20 require ( github.com/gin-gonic/gin v1.9.1 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 github.com/jung-kurt/gofpdf v1.16.2 + github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de + github.com/spf13/pflag v1.0.5 github.com/wcharczuk/go-chart v2.0.1+incompatible github.com/xuri/excelize/v2 v2.8.0 - golang.org/x/crypto v0.14.0 + golang.org/x/crypto v0.18.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/blend/go-sdk v1.20220411.3 // indirect - github.com/bytedance/sonic v1.10.2 // indirect + github.com/buildkite/shellwords v0.0.0-20180315110454-59467a9b8e10 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.1 // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.15.5 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/leodido/go-urn v1.2.4 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/leodido/go-urn v1.3.0 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/phpdave11/gofpdi v1.0.13 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/richardlehane/mscfb v1.0.4 // indirect github.com/richardlehane/msoleps v1.0.3 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect +) + +require ( + github.com/akedrou/textdiff v0.0.0-20230423230343-2ebdcebdccc1 + github.com/blend/go-sdk v1.20220411.3 // indirect + github.com/bytedance/sonic v1.10.2 // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/go-playground/validator/v10 v10.17.0 // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/phpdave11/gofpdi v1.0.13 // indirect + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.8.4 + github.com/ugorji/go/codec v1.2.12 // indirect github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 // indirect github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 // indirect - golang.org/x/arch v0.5.0 // indirect - golang.org/x/image v0.13.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/image v0.15.0 // indirect + golang.org/x/net v0.20.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect ) diff --git a/go.sum b/go.sum index 1211ed1e..b5317e53 100644 --- a/go.sum +++ b/go.sum @@ -1,25 +1,29 @@ +github.com/akedrou/textdiff v0.0.0-20230423230343-2ebdcebdccc1 h1:XfKKiQL7irIGI7nfu4a6IKhrgUHvKwhH/AnuHgZy/+U= +github.com/akedrou/textdiff v0.0.0-20230423230343-2ebdcebdccc1/go.mod h1:PJwvxBpzqjdeomc0r8Hgc+xJC7k6z+k371tffCGXR2M= github.com/blend/go-sdk v1.20220411.3 h1:GFV4/FQX5UzXLPwWV03gP811pj7B8J2sbuq+GJQofXc= github.com/blend/go-sdk v1.20220411.3/go.mod h1:7lnH8fTi6U4i1fArEXRyOIY2E1X4MALg09qsQqY1+ak= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/buildkite/shellwords v0.0.0-20180315110454-59467a9b8e10 h1:XwHQ5xDtYPdtBbVPyRO6UZoWZe8/mbKUb076f8x7RvI= +github.com/buildkite/shellwords v0.0.0-20180315110454-59467a9b8e10/go.mod h1:gv0DYOzHEsKgo31lTCDGauIg4DTTGn41Bzp+t3wSOlk= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= @@ -27,42 +31,44 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= -github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= -github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.16.0 h1:x+plE831WK4vaKHO/jpgUGsvLKIqRRkz6M78GuJAfGE= +github.com/go-playground/validator/v10 v10.16.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= +github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5PtRc= github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= +github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -70,15 +76,13 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/phpdave11/gofpdi v1.0.7 h1:k2oy4yhkQopCK+qW8KjCla0iU2RpDow+QUDmH9DDt44= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13 h1:o61duiW8M9sMlkVXWlvP92sZJtGKENvW3VExs6dZukQ= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -89,7 +93,12 @@ github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7 github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= github.com/richardlehane/msoleps v1.0.3 h1:aznSZzrwYRl3rLKRT3gUk9am7T/mLNSnJINvN0AQoVM= github.com/richardlehane/msoleps v1.0.3/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -100,41 +109,35 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/wcharczuk/go-chart v2.0.1+incompatible h1:0pz39ZAycJFF7ju/1mepnk26RLVLBCWz1STcD3doU0A= github.com/wcharczuk/go-chart v2.0.1+incompatible/go.mod h1:PF5tmL4EIx/7Wf+hEkpCqYi5He4u90sw+0+6FhrryuE= -github.com/xuri/efp v0.0.0-20230802181842-ad255f2331ca h1:uvPMDVyP7PXMMioYdyPH+0O+Ta/UO1WFfNYMO3Wz0eg= github.com/xuri/efp v0.0.0-20230802181842-ad255f2331ca/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI= github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 h1:Chd9DkqERQQuHpXjR/HSV1jLZA6uaoiwwH3vSuF3IW0= github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI= github.com/xuri/excelize/v2 v2.8.0 h1:Vd4Qy809fupgp1v7X+nCS/MioeQmYVVzi495UCTqB7U= github.com/xuri/excelize/v2 v2.8.0/go.mod h1:6iA2edBTKxKbZAa7X5bDhcCg51xdOn1Ar5sfoXRGrQg= -github.com/xuri/nfp v0.0.0-20230819163627-dc951e3ffe1a h1:Mw2VNrNNNjDtw68VsEj2+st+oCSn4Uz7vZw6TbhcV1o= github.com/xuri/nfp v0.0.0-20230819163627-dc951e3ffe1a/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ= github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 h1:qhbILQo1K3mphbwKh1vNm4oGezE1eF9fQWmNiIpSfI4= github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= -golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo= golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8= -golang.org/x/image v0.13.0 h1:3cge/F/QTkNLauhf2QoE9zp+7sr+ZcL4HnoZmdwg9sg= -golang.org/x/image v0.13.0/go.mod h1:6mmbMOeV28HuMTgA6OSRkdXKYw/t5W9Uwn2Yv1r3Yxk= +golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8= +golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -142,25 +145,24 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -172,21 +174,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/threagile/about.go b/internal/threagile/about.go new file mode 100644 index 00000000..97c45f44 --- /dev/null +++ b/internal/threagile/about.go @@ -0,0 +1,59 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "errors" + "fmt" + "github.com/threagile/threagile/pkg/common" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/threagile/threagile/pkg/docs" +) + +func (what *Threagile) initAbout() *Threagile { + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.PrintVersionCommand, + Short: "Get version information", + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp), + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.Print3rdPartyCommand, + Short: "Print 3rd-party license information", + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp) + "\n\n" + docs.ThirdPartyLicenses, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.PrintLicenseCommand, + Short: "Print license information", + RunE: func(cmd *cobra.Command, args []string) error { + appDir, err := cmd.Flags().GetString(appDirFlagName) + if err != nil { + cmd.Printf("Unable to read app-dir flag: %v", err) + return err + } + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + if appDir != filepath.Clean(appDir) { + // TODO: do we need this check here? + cmd.Printf("weird app folder %v", appDir) + return errors.New("weird app folder") + } + content, err := os.ReadFile(filepath.Clean(filepath.Join(appDir, "LICENSE.txt"))) + if err != nil { + cmd.Printf("Unable to read license file: %v", err) + return err + } + cmd.Print(string(content)) + cmd.Println() + return nil + }, + }) + + return what +} diff --git a/internal/threagile/analyze.go b/internal/threagile/analyze.go new file mode 100644 index 00000000..e3ed3b43 --- /dev/null +++ b/internal/threagile/analyze.go @@ -0,0 +1,41 @@ +package threagile + +import ( + "github.com/spf13/cobra" + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/model" + "github.com/threagile/threagile/pkg/report" +) + +func (what *Threagile) initAnalyze() *Threagile { + analyze := &cobra.Command{ + Use: common.AnalyzeModelCommand, + Short: "Analyze model", + Aliases: []string{"analyze", "analyse", "run", "analyse-model"}, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := what.readConfig(cmd, what.buildTimestamp) + commands := what.readCommands() + progressReporter := common.DefaultProgressReporter{Verbose: cfg.Verbose} + + r, err := model.ReadAndAnalyzeModel(*cfg, progressReporter) + if err != nil { + cmd.Printf("Failed to read and analyze model: %v", err) + return err + } + + err = report.Generate(cfg, r, commands, progressReporter) + if err != nil { + cmd.Printf("Failed to generate reports: %v \n", err) + return err + } + return nil + }, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + } + + what.rootCmd.AddCommand(analyze) + + return what +} diff --git a/internal/threagile/examples.go b/internal/threagile/examples.go new file mode 100644 index 00000000..4b48486e --- /dev/null +++ b/internal/threagile/examples.go @@ -0,0 +1,115 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "fmt" + "github.com/spf13/cobra" + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/examples" +) + +func (what *Threagile) initExamples() *Threagile { + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.CreateExampleModelCommand, + Short: "Create example threagile model", + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp) + "\n\njust create an example model named threagile-example-model.yaml in the output directory", + RunE: func(cmd *cobra.Command, args []string) error { + appDir, err := cmd.Flags().GetString(appDirFlagName) + if err != nil { + cmd.Printf("Unable to read app-dir flag: %v", err) + return err + } + outDir, err := cmd.Flags().GetString(outputFlagName) + if err != nil { + cmd.Printf("Unable to read output flag: %v", err) + return err + } + + err = examples.CreateExampleModelFile(appDir, outDir) + if err != nil { + cmd.Printf("Unable to copy example model: %v", err) + return err + } + + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("An example model was created named threagile-example-model.yaml in the output directory.") + cmd.Println() + cmd.Println(docs.Examples) + cmd.Println() + return nil + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.CreateStubModelCommand, + Short: "Create stub threagile model", + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp) + "\n\njust create a minimal stub model named threagile-stub-model.yaml in the output directory", + RunE: func(cmd *cobra.Command, args []string) error { + appDir, err := cmd.Flags().GetString(appDirFlagName) + if err != nil { + cmd.Printf("Unable to read app-dir flag: %v", err) + return err + } + outDir, err := cmd.Flags().GetString(outputFlagName) + if err != nil { + cmd.Printf("Unable to read output flag: %v", err) + return err + } + + err = examples.CreateStubModelFile(appDir, outDir) + if err != nil { + cmd.Printf("Unable to copy stub model: %v", err) + return err + } + + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("A minimal stub model was created named threagile-stub-model.yaml in the output directory.") + cmd.Println() + cmd.Println(docs.Examples) + cmd.Println() + return nil + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.CreateEditingSupportCommand, + Short: "Create editing support", + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp) + "\n\njust create some editing support stuff in the output directory", + RunE: func(cmd *cobra.Command, args []string) error { + appDir, err := cmd.Flags().GetString(appDirFlagName) + if err != nil { + cmd.Printf("Unable to read app-dir flag: %v", err) + return err + } + outDir, err := cmd.Flags().GetString(outputFlagName) + if err != nil { + cmd.Printf("Unable to read output flag: %v", err) + return err + } + + err = examples.CreateEditingSupportFiles(appDir, outDir) + if err != nil { + cmd.Printf("Unable to copy editing support files: %v", err) + return err + } + + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("The following files were created in the output directory:") + cmd.Println(" - schema.json") + cmd.Println(" - live-templates.txt") + cmd.Println() + cmd.Println("For a perfect editing experience within your IDE of choice you can easily get " + + "model syntax validation and autocompletion (very handy for enum values) as well as live templates: " + + "Just import the schema.json into your IDE and assign it as \"schema\" to each Threagile YAML file. " + + "Also try to import individual parts from the live-templates.txt file into your IDE as live editing templates.") + cmd.Println() + return nil + }, + }) + + return what +} diff --git a/internal/threagile/flags.go b/internal/threagile/flags.go new file mode 100644 index 00000000..42554c36 --- /dev/null +++ b/internal/threagile/flags.go @@ -0,0 +1,70 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +const ( + configFlagName = "config" + + interactiveFlagName = "interactive" + interactiveFlagShorthand = "i" + + verboseFlagName = "verbose" + verboseFlagShorthand = "v" + + appDirFlagName = "app-dir" + binDirFlagName = "bin-dir" + outputFlagName = "output" + tempDirFlagName = "temp-dir" + + serverDirFlagName = "server-dir" + serverPortFlagName = "server-port" + + inputFileFlagName = "model" + raaPluginFlagName = "raa-run" + + customRiskRulesPluginFlagName = "custom-risk-rules-plugin" + diagramDpiFlagName = "diagram-dpi" + skipRiskRulesFlagName = "skip-risk-rules" + ignoreOrphanedRiskTrackingFlagName = "ignore-orphaned-risk-tracking" + templateFileNameFlagName = "background" + + generateDataFlowDiagramFlagName = "generate-data-flow-diagram" + generateDataAssetDiagramFlagName = "generate-data-asset-diagram" + generateRisksJSONFlagName = "generate-risks-json" + generateTechnicalAssetsJSONFlagName = "generate-technical-assets-json" + generateStatsJSONFlagName = "generate-stats-json" + generateRisksExcelFlagName = "generate-risks-excel" + generateTagsExcelFlagName = "generate-tags-excel" + generateReportPDFFlagName = "generate-report-pdf" +) + +type Flags struct { + configFlag string + verboseFlag bool + interactiveFlag bool + appDirFlag string + binDirFlag string + outputDirFlag string + tempDirFlag string + inputFileFlag string + raaPluginFlag string + serverPortFlag int + serverDirFlag string + + skipRiskRulesFlag string + customRiskRulesPluginFlag string + ignoreOrphanedRiskTrackingFlag bool + templateFileNameFlag string + diagramDpiFlag int + + generateDataFlowDiagramFlag bool + generateDataAssetDiagramFlag bool + generateRisksJSONFlag bool + generateTechnicalAssetsJSONFlag bool + generateStatsJSONFlag bool + generateRisksExcelFlag bool + generateTagsExcelFlag bool + generateReportPDFFlag bool +} diff --git a/internal/threagile/macros.go b/internal/threagile/macros.go new file mode 100644 index 00000000..439d9bc9 --- /dev/null +++ b/internal/threagile/macros.go @@ -0,0 +1,95 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/macros" + "github.com/threagile/threagile/pkg/model" +) + +func (what *Threagile) initMacros() *Threagile { + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ListModelMacrosCommand, + Short: "Print model macros", + Run: func(cmd *cobra.Command, args []string) { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("The following model macros are available (can be extended via custom model macros):") + cmd.Println() + /* TODO finish plugin stuff + cmd.Println("Custom model macros:") + for _, macros := range macros.ListCustomMacros() { + details := macros.GetMacroDetails() + cmd.Println(details.ID, "-->", details.Title) + } + cmd.Println() + */ + cmd.Println("----------------------") + cmd.Println("Built-in model macros:") + cmd.Println("----------------------") + for _, macros := range macros.ListBuiltInMacros() { + details := macros.GetMacroDetails() + cmd.Println(details.ID, "-->", details.Title) + } + cmd.Println() + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ExplainModelMacrosCommand, + Short: "Explain model macros", + Run: func(cmd *cobra.Command, args []string) { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("Explanation for the model macros:") + cmd.Println() + /* TODO finish plugin stuff + cmd.Println("Custom model macros:") + for _, macros := range macros.ListCustomMacros() { + details := macros.GetMacroDetails() + cmd.Println(details.ID, "-->", details.Title) + } + cmd.Println() + */ + cmd.Println("----------------------") + cmd.Println("Built-in model macros:") + cmd.Println("----------------------") + for _, macros := range macros.ListBuiltInMacros() { + details := macros.GetMacroDetails() + cmd.Printf("%v: %v\n", details.ID, details.Title) + } + + cmd.Println() + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: "execute-model-macro", + Short: "Execute model macro", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cfg := what.readConfig(cmd, what.buildTimestamp) + progressReporter := common.DefaultProgressReporter{Verbose: cfg.Verbose} + + r, err := model.ReadAndAnalyzeModel(*cfg, progressReporter) + if err != nil { + return fmt.Errorf("unable to read and analyze model: %v", err) + } + + macrosId := args[0] + err = macros.ExecuteModelMacro(r.ModelInput, cfg.InputFile, r.ParsedModel, macrosId) + if err != nil { + return fmt.Errorf("unable to execute model macro: %v", err) + } + return nil + }, + }) + + return what +} diff --git a/internal/threagile/quit.go b/internal/threagile/quit.go new file mode 100644 index 00000000..44aa8655 --- /dev/null +++ b/internal/threagile/quit.go @@ -0,0 +1,25 @@ +package threagile + +import ( + "github.com/spf13/cobra" + "github.com/threagile/threagile/pkg/common" + "os" +) + +func (what *Threagile) initQuit() *Threagile { + analyze := &cobra.Command{ + Use: common.QuitCommand, + Short: "quit client", + Aliases: []string{"exit", "bye", "x", "q"}, + Run: func(cmd *cobra.Command, args []string) { + os.Exit(0) + }, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + } + + what.rootCmd.AddCommand(analyze) + + return what +} diff --git a/internal/threagile/root.go b/internal/threagile/root.go new file mode 100644 index 00000000..6b123bb4 --- /dev/null +++ b/internal/threagile/root.go @@ -0,0 +1,280 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "fmt" + "github.com/chzyer/readline" + "github.com/mattn/go-shellwords" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/report" +) + +const ( + UsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}} +` +) + +func (what *Threagile) initRoot() *Threagile { + what.rootCmd = &cobra.Command{ + Use: "threagile", + Version: docs.ThreagileVersion, + Short: "\n" + docs.Logo, + Long: "\n" + docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp) + "\n\n" + docs.Examples, + SilenceErrors: true, + SilenceUsage: true, + Run: what.run, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + } + + defaultConfig := new(common.Config).Defaults(what.buildTimestamp) + + what.rootCmd.PersistentFlags().StringVar(&what.flags.appDirFlag, appDirFlagName, defaultConfig.AppFolder, "app folder") + what.rootCmd.PersistentFlags().StringVar(&what.flags.binDirFlag, binDirFlagName, defaultConfig.BinFolder, "binary folder location") + what.rootCmd.PersistentFlags().StringVar(&what.flags.outputDirFlag, outputFlagName, defaultConfig.OutputFolder, "output directory") + what.rootCmd.PersistentFlags().StringVar(&what.flags.tempDirFlag, tempDirFlagName, defaultConfig.TempFolder, "temporary folder location") + + what.rootCmd.PersistentFlags().StringVar(&what.flags.inputFileFlag, inputFileFlagName, defaultConfig.InputFile, "input model yaml file") + what.rootCmd.PersistentFlags().StringVar(&what.flags.raaPluginFlag, raaPluginFlagName, defaultConfig.RAAPlugin, "RAA calculation run file name") + + what.rootCmd.PersistentFlags().BoolVarP(&what.flags.interactiveFlag, interactiveFlagName, interactiveFlagShorthand, defaultConfig.Interactive, "interactive mode") + what.rootCmd.PersistentFlags().BoolVarP(&what.flags.verboseFlag, verboseFlagName, verboseFlagShorthand, defaultConfig.Verbose, "verbose output") + + what.rootCmd.PersistentFlags().StringVar(&what.flags.configFlag, configFlagName, "", "config file") + + what.rootCmd.PersistentFlags().StringVar(&what.flags.customRiskRulesPluginFlag, customRiskRulesPluginFlagName, strings.Join(defaultConfig.RiskRulesPlugins, ","), "comma-separated list of plugins file names with custom risk rules to load") + what.rootCmd.PersistentFlags().IntVar(&what.flags.diagramDpiFlag, diagramDpiFlagName, defaultConfig.DiagramDPI, "DPI used to render: maximum is "+fmt.Sprintf("%d", common.MaxGraphvizDPI)+"") + what.rootCmd.PersistentFlags().StringVar(&what.flags.skipRiskRulesFlag, skipRiskRulesFlagName, defaultConfig.SkipRiskRules, "comma-separated list of risk rules (by their ID) to skip") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.ignoreOrphanedRiskTrackingFlag, ignoreOrphanedRiskTrackingFlagName, defaultConfig.IgnoreOrphanedRiskTracking, "ignore orphaned risk tracking (just log them) not matching a concrete risk") + what.rootCmd.PersistentFlags().StringVar(&what.flags.templateFileNameFlag, templateFileNameFlagName, defaultConfig.TemplateFilename, "background pdf file") + + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateDataFlowDiagramFlag, generateDataFlowDiagramFlagName, true, "generate data flow diagram") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateDataAssetDiagramFlag, generateDataAssetDiagramFlagName, true, "generate data asset diagram") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateRisksJSONFlag, generateRisksJSONFlagName, true, "generate risks json") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateTechnicalAssetsJSONFlag, generateTechnicalAssetsJSONFlagName, true, "generate technical assets json") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateStatsJSONFlag, generateStatsJSONFlagName, true, "generate stats json") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateRisksExcelFlag, generateRisksExcelFlagName, true, "generate risks excel") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateTagsExcelFlag, generateTagsExcelFlagName, true, "generate tags excel") + what.rootCmd.PersistentFlags().BoolVar(&what.flags.generateReportPDFFlag, generateReportPDFFlagName, true, "generate report pdf, including diagrams") + + return what +} + +func (what *Threagile) run(*cobra.Command, []string) { + if !what.flags.interactiveFlag { + return + } + + what.rootCmd.Use = "\b" + completer := readline.NewPrefixCompleter() + for _, child := range what.rootCmd.Commands() { + what.cobraToReadline(completer, child) + } + + dir, homeError := os.UserHomeDir() + if homeError != nil { + return + } + + shell, readlineError := readline.NewEx(&readline.Config{ + Prompt: "\033[31m>>\033[0m ", + HistoryFile: filepath.Join(dir, ".threagile_history"), + HistoryLimit: 1000, + AutoComplete: completer, + InterruptPrompt: "^C", + EOFPrompt: "quit", + HistorySearchFold: true, + }) + + if readlineError != nil { + return + } + + defer func() { _ = shell.Close() }() + + for { + line, readError := shell.Readline() + if readError != nil { + return + } + + if len(strings.TrimSpace(line)) == 0 { + continue + } + + params, parseError := shellwords.Parse(line) + if parseError != nil { + fmt.Printf("failed to parse command line: %s", parseError.Error()) + continue + } + + cmd, args, findError := what.rootCmd.Find(params) + if findError != nil { + fmt.Printf("failed to find command: %s", findError.Error()) + continue + } + + if cmd == nil || cmd == what.rootCmd { + fmt.Printf("failed to find command") + continue + } + + flagsError := cmd.ParseFlags(args) + if flagsError != nil { + fmt.Printf("invalid flags: %s", flagsError.Error()) + continue + } + + if !cmd.DisableFlagParsing { + args = cmd.Flags().Args() + } + + argsError := cmd.ValidateArgs(args) + if argsError != nil { + _ = cmd.Help() + continue + } + + if cmd.Run != nil { + cmd.Run(cmd, args) + continue + } + + if cmd.RunE != nil { + runError := cmd.RunE(cmd, args) + if runError != nil { + fmt.Printf("error: %v \n", runError) + } + continue + } + + _ = cmd.Help() + continue + } +} + +func (c *Threagile) cobraToReadline(node readline.PrefixCompleterInterface, cmd *cobra.Command) { + cmd.SetUsageTemplate(UsageTemplate) + cmd.Use = c.usage(cmd) + pcItem := readline.PcItem(cmd.Use) + node.SetChildren(append(node.GetChildren(), pcItem)) + + for _, child := range cmd.Commands() { + c.cobraToReadline(pcItem, child) + } +} + +func (c *Threagile) usage(cmd *cobra.Command) string { + words := make([]string, 0, len(cmd.ArgAliases)+1) + words = append(words, cmd.Use) + + for _, name := range cmd.ArgAliases { + words = append(words, "["+name+"]") + } + + return strings.Join(words, " ") +} + +func (what *Threagile) readCommands() *report.GenerateCommands { + commands := new(report.GenerateCommands).Defaults() + commands.DataFlowDiagram = what.flags.generateDataFlowDiagramFlag + commands.DataAssetDiagram = what.flags.generateDataAssetDiagramFlag + commands.RisksJSON = what.flags.generateRisksJSONFlag + commands.StatsJSON = what.flags.generateStatsJSONFlag + commands.TechnicalAssetsJSON = what.flags.generateTechnicalAssetsJSONFlag + commands.RisksExcel = what.flags.generateRisksExcelFlag + commands.TagsExcel = what.flags.generateTagsExcelFlag + commands.ReportPDF = what.flags.generateReportPDFFlag + return commands +} + +func (what *Threagile) readConfig(cmd *cobra.Command, buildTimestamp string) *common.Config { + cfg := new(common.Config).Defaults(buildTimestamp) + configError := cfg.Load(what.flags.configFlag) + if configError != nil { + fmt.Printf("WARNING: failed to load config file %q: %v\n", what.flags.configFlag, configError) + } + + flags := cmd.Flags() + if isFlagOverridden(flags, serverPortFlagName) { + cfg.ServerPort = what.flags.serverPortFlag + } + if isFlagOverridden(flags, serverDirFlagName) { + cfg.ServerFolder = cfg.CleanPath(what.flags.serverDirFlag) + } + + if isFlagOverridden(flags, appDirFlagName) { + cfg.AppFolder = cfg.CleanPath(what.flags.appDirFlag) + } + if isFlagOverridden(flags, binDirFlagName) { + cfg.BinFolder = cfg.CleanPath(what.flags.binDirFlag) + } + if isFlagOverridden(flags, outputFlagName) { + cfg.OutputFolder = cfg.CleanPath(what.flags.outputDirFlag) + } + if isFlagOverridden(flags, tempDirFlagName) { + cfg.TempFolder = cfg.CleanPath(what.flags.tempDirFlag) + } + + if isFlagOverridden(flags, verboseFlagName) { + cfg.Verbose = what.flags.verboseFlag + } + + if isFlagOverridden(flags, inputFileFlagName) { + cfg.InputFile = cfg.CleanPath(what.flags.inputFileFlag) + } + if isFlagOverridden(flags, raaPluginFlagName) { + cfg.RAAPlugin = what.flags.raaPluginFlag + } + + if isFlagOverridden(flags, customRiskRulesPluginFlagName) { + cfg.RiskRulesPlugins = strings.Split(what.flags.customRiskRulesPluginFlag, ",") + } + if isFlagOverridden(flags, skipRiskRulesFlagName) { + cfg.SkipRiskRules = what.flags.skipRiskRulesFlag + } + if isFlagOverridden(flags, ignoreOrphanedRiskTrackingFlagName) { + cfg.IgnoreOrphanedRiskTracking = what.flags.ignoreOrphanedRiskTrackingFlag + } + if isFlagOverridden(flags, diagramDpiFlagName) { + cfg.DiagramDPI = what.flags.diagramDpiFlag + } + if isFlagOverridden(flags, templateFileNameFlagName) { + cfg.TemplateFilename = what.flags.templateFileNameFlag + } + return cfg +} + +func isFlagOverridden(flags *pflag.FlagSet, flagName string) bool { + flag := flags.Lookup(flagName) + if flag == nil { + return false + } + return flag.Changed +} diff --git a/internal/threagile/rules.go b/internal/threagile/rules.go new file mode 100644 index 00000000..b2bbdb3e --- /dev/null +++ b/internal/threagile/rules.go @@ -0,0 +1,77 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "fmt" + "strings" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/model" + "github.com/threagile/threagile/pkg/security/risks" + + "github.com/spf13/cobra" + + "github.com/threagile/threagile/pkg/docs" +) + +func (what *Threagile) initRules() *Threagile { + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ListRiskRulesCommand, + Short: "Print available risk rules", + RunE: func(cmd *cobra.Command, args []string) error { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("The following risk rules are available (can be extended via custom risk rules):") + cmd.Println() + cmd.Println("----------------------") + cmd.Println("Custom risk rules:") + cmd.Println("----------------------") + customRiskRules := model.LoadCustomRiskRules(strings.Split(what.flags.customRiskRulesPluginFlag, ","), common.DefaultProgressReporter{Verbose: what.flags.verboseFlag}) + for id, customRule := range customRiskRules { + cmd.Println(id, "-->", customRule.Category.Title, "--> with tags:", customRule.Tags) + } + cmd.Println() + cmd.Println("--------------------") + cmd.Println("Built-in risk rules:") + cmd.Println("--------------------") + cmd.Println() + for _, rule := range risks.GetBuiltInRiskRules() { + cmd.Println(rule.Category().Id, "-->", rule.Category().Title, "--> with tags:", rule.SupportedTags()) + } + + return nil + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ExplainRiskRulesCommand, + Short: "Detailed explanation of all the risk rules", + RunE: func(cmd *cobra.Command, args []string) error { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println("Explanation for risk rules:") + cmd.Println() + cmd.Println("----------------------") + cmd.Println("Custom risk rules:") + cmd.Println("----------------------") + customRiskRules := model.LoadCustomRiskRules(strings.Split(what.flags.customRiskRulesPluginFlag, ","), common.DefaultProgressReporter{Verbose: what.flags.verboseFlag}) + for _, customRule := range customRiskRules { + cmd.Printf("%v: %v\n", customRule.Category.Id, customRule.Category.Description) + } + cmd.Println() + cmd.Println("--------------------") + cmd.Println("Built-in risk rules:") + cmd.Println("--------------------") + cmd.Println() + for _, rule := range risks.GetBuiltInRiskRules() { + cmd.Printf("%v: %v\n", rule.Category().Id, rule.Category().Description) + } + cmd.Println() + + return nil + }, + }) + + return what +} diff --git a/internal/threagile/server.go b/internal/threagile/server.go new file mode 100644 index 00000000..aba7f6f8 --- /dev/null +++ b/internal/threagile/server.go @@ -0,0 +1,33 @@ +package threagile + +import ( + "github.com/spf13/cobra" + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/server" +) + +func (what *Threagile) initServer() *Threagile { + defaultConfig := new(common.Config).Defaults(what.buildTimestamp) + + serverCmd := &cobra.Command{ + Use: "server", + Short: "Run server", + RunE: func(cmd *cobra.Command, args []string) error { + cfg := what.readConfig(cmd, what.buildTimestamp) + cfg.ServerMode = true + serverError := cfg.CheckServerFolder() + if serverError != nil { + return serverError + } + server.RunServer(cfg) + return nil + }, + } + + serverCmd.PersistentFlags().IntVar(&what.flags.serverPortFlag, serverPortFlagName, defaultConfig.ServerPort, "server port") + serverCmd.PersistentFlags().StringVar(&what.flags.serverDirFlag, serverDirFlagName, defaultConfig.DataFolder, "base folder for server mode (default: "+common.DataDir+")") + + what.rootCmd.AddCommand(serverCmd) + + return what +} diff --git a/internal/threagile/threagile.go b/internal/threagile/threagile.go new file mode 100644 index 00000000..a3a5939a --- /dev/null +++ b/internal/threagile/threagile.go @@ -0,0 +1,24 @@ +package threagile + +import ( + "github.com/spf13/cobra" + "os" +) + +type Threagile struct { + flags Flags + rootCmd *cobra.Command + buildTimestamp string +} + +func (what *Threagile) Execute() { + err := what.rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func (what *Threagile) Init(buildTimestamp string) *Threagile { + what.buildTimestamp = buildTimestamp + return what.initRoot().initAbout().initRules().initExamples().initMacros().initTypes().initAnalyze().initServer().initQuit() +} diff --git a/internal/threagile/types.go b/internal/threagile/types.go new file mode 100644 index 00000000..f6f50ffd --- /dev/null +++ b/internal/threagile/types.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package threagile + +import ( + "fmt" + "github.com/threagile/threagile/pkg/common" + + "github.com/spf13/cobra" + + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/security/types" +) + +func (what *Threagile) initTypes() *Threagile { + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ListTypesCommand, + Short: "Print type information (enum values to be used in models)", + Run: func(cmd *cobra.Command, args []string) { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + cmd.Println() + cmd.Println() + cmd.Println("The following types are available (can be extended for custom rules):") + cmd.Println() + for name, values := range types.GetBuiltinTypeValues() { + cmd.Println(fmt.Sprintf(" %v: %v", name, values)) + } + }, + }) + + what.rootCmd.AddCommand(&cobra.Command{ + Use: common.ExplainTypesCommand, + Short: "Print type information (enum values to be used in models)", + Run: func(cmd *cobra.Command, args []string) { + cmd.Println(docs.Logo + "\n\n" + fmt.Sprintf(docs.VersionText, what.buildTimestamp)) + fmt.Println("Explanation for the types:") + cmd.Println() + cmd.Println("The following types are available (can be extended for custom rules):") + cmd.Println() + for name, values := range types.GetBuiltinTypeValues() { + cmd.Println(name) + for _, candidate := range values { + cmd.Printf("\t %v: %v\n", candidate, candidate.Explain()) + } + } + }, + }) + + return what +} diff --git a/macros/built-in/add-build-pipeline/add-build-pipeline-macro.go b/macros/built-in/add-build-pipeline/add-build-pipeline-macro.go deleted file mode 100644 index dc68cea1..00000000 --- a/macros/built-in/add-build-pipeline/add-build-pipeline-macro.go +++ /dev/null @@ -1,1012 +0,0 @@ -package add_build_pipeline - -import ( - "fmt" - "github.com/threagile/threagile/model" - "sort" - "strings" -) - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "add-build-pipeline", - Title: "Add Build Pipeline", - Description: "This model macro adds a build pipeline (development client, build pipeline, artifact registry, container image registry, " + - "source code repository, etc.) to the model.", - } -} - -var macroState = make(map[string][]string) -var questionsAnswered = make([]string, 0) -var codeInspectionUsed, containerTechUsed, withinTrustBoundary, createNewTrustBoundary bool - -const createNewTrustBoundaryLabel = "CREATE NEW TRUST BOUNDARY" - -var pushOrPull = []string{ - "Push-based Deployment (build pipeline deploys towards target asset)", - "Pull-based Deployment (deployment target asset fetches deployment from registry)", -} - -// TODO add question for type of machine (either physical, virtual, container, etc.) - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - counter := len(questionsAnswered) - if counter > 3 && !codeInspectionUsed { - counter++ - } - if counter > 5 && !containerTechUsed { - counter += 2 - } - if counter > 12 && !withinTrustBoundary { - counter++ - } - if counter > 13 && !createNewTrustBoundary { - counter++ - } - switch counter { - case 0: - return model.MacroQuestion{ - ID: "source-repository", - Title: "What product is used as the sourcecode repository?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "Git", - }, nil - case 1: - return model.MacroQuestion{ - ID: "build-pipeline", - Title: "What product is used as the build pipeline?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "Jenkins", - }, nil - case 2: - return model.MacroQuestion{ - ID: "artifact-registry", - Title: "What product is used as the artifact registry?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "Nexus", - }, nil - case 3: - return model.MacroQuestion{ - ID: "code-inspection-used", - Title: "Are code inspection platforms (like SonarQube) used?", - Description: "This affects whether code inspection platform are added.", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "Yes", - }, nil - case 4: - return model.MacroQuestion{ - ID: "code-inspection-platform", - Title: "What product is used as the code inspection platform?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "SonarQube", - }, nil - case 5: - return model.MacroQuestion{ - ID: "container-technology-used", - Title: "Is container technology (like Docker) used?", - Description: "This affects whether container registries are added.", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "Yes", - }, nil - case 6: - return model.MacroQuestion{ - ID: "container-registry", - Title: "What product is used as the container registry?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "Docker", - }, nil - case 7: - return model.MacroQuestion{ - ID: "container-platform", - Title: "What product is used as the container platform (for orchestration and runtime)?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "Kubernetes", - }, nil - case 8: - return model.MacroQuestion{ - ID: "internet", - Title: "Are build pipeline components exposed on the internet?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "No", - }, nil - case 9: - return model.MacroQuestion{ - ID: "multi-tenant", - Title: "Are build pipeline components used by multiple tenants?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "No", - }, nil - case 10: - return model.MacroQuestion{ - ID: "encryption", - Title: "Are build pipeline components encrypted?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "No", - }, nil - case 11: - possibleAnswers := make([]string, 0) - for id, _ := range model.ParsedModelRoot.TechnicalAssets { - possibleAnswers = append(possibleAnswers, id) - } - sort.Strings(possibleAnswers) - if len(possibleAnswers) > 0 { - return model.MacroQuestion{ - ID: "deploy-targets", - Title: "Select all technical assets where the build pipeline deploys to:", - Description: "This affects the communication links being generated.", - PossibleAnswers: possibleAnswers, - MultiSelect: true, - DefaultAnswer: "", - }, nil - } - case 12: - return model.MacroQuestion{ - ID: "within-trust-boundary", - Title: "Are the server-side components of the build pipeline components within a network trust boundary?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "Yes", - }, nil - case 13: - possibleAnswers := []string{createNewTrustBoundaryLabel} - for id, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { - if trustBoundary.Type.IsNetworkBoundary() { - possibleAnswers = append(possibleAnswers, id) - } - } - sort.Strings(possibleAnswers) - return model.MacroQuestion{ - ID: "selected-trust-boundary", - Title: "Choose from the list of existing network trust boundaries or create a new one?", - Description: "", - PossibleAnswers: possibleAnswers, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 14: - return model.MacroQuestion{ - ID: "new-trust-boundary-type", - Title: "Of which type shall the new trust boundary be?", - Description: "", - PossibleAnswers: []string{model.NetworkOnPrem.String(), - model.NetworkDedicatedHoster.String(), - model.NetworkVirtualLAN.String(), - model.NetworkCloudProvider.String(), - model.NetworkCloudSecurityGroup.String(), - model.NetworkPolicyNamespaceIsolation.String()}, - MultiSelect: false, - DefaultAnswer: model.NetworkOnPrem.String(), - }, nil - case 15: - return model.MacroQuestion{ - ID: "push-or-pull", - Title: "What type of deployment strategy is used?", - Description: "Push-based deployments are more classic ones and pull-based are more GitOps-like ones.", - PossibleAnswers: pushOrPull, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 16: - return model.MacroQuestion{ - ID: "owner", - Title: "Who is the owner of the build pipeline and runtime assets?", - Description: "This name affects the technical asset's and data asset's owner.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "", - }, nil - } - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - macroState[questionID] = answer - questionsAnswered = append(questionsAnswered, questionID) - if questionID == "code-inspection-used" { - codeInspectionUsed = strings.ToLower(macroState["code-inspection-used"][0]) == "yes" - } else if questionID == "container-technology-used" { - containerTechUsed = strings.ToLower(macroState["container-technology-used"][0]) == "yes" - } else if questionID == "within-trust-boundary" { - withinTrustBoundary = strings.ToLower(macroState["within-trust-boundary"][0]) == "yes" - } else if questionID == "selected-trust-boundary" { - createNewTrustBoundary = strings.ToLower(macroState["selected-trust-boundary"][0]) == strings.ToLower(createNewTrustBoundaryLabel) - } - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - if len(questionsAnswered) == 0 { - return "Cannot go back further", false, nil - } - lastQuestionID := questionsAnswered[len(questionsAnswered)-1] - questionsAnswered = questionsAnswered[:len(questionsAnswered)-1] - delete(macroState, lastQuestionID) - return "Undo successful", true, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - changeLogCollector := make([]string, 0) - message, validResult, err = applyChange(modelInput, &changeLogCollector, true) - return changeLogCollector, message, validResult, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - changeLogCollector := make([]string, 0) - message, validResult, err = applyChange(modelInput, &changeLogCollector, false) - return message, validResult, err -} - -func applyChange(modelInput *model.ModelInput, changeLogCollector *[]string, dryRun bool) (message string, validResult bool, err error) { - var serverSideTechAssets = make([]string, 0) - // ################################################ - model.AddTagToModelInput(modelInput, macroState["source-repository"][0], dryRun, changeLogCollector) - model.AddTagToModelInput(modelInput, macroState["build-pipeline"][0], dryRun, changeLogCollector) - model.AddTagToModelInput(modelInput, macroState["artifact-registry"][0], dryRun, changeLogCollector) - if containerTechUsed { - model.AddTagToModelInput(modelInput, macroState["container-registry"][0], dryRun, changeLogCollector) - model.AddTagToModelInput(modelInput, macroState["container-platform"][0], dryRun, changeLogCollector) - } - if codeInspectionUsed { - model.AddTagToModelInput(modelInput, macroState["code-inspection-platform"][0], dryRun, changeLogCollector) - } - - sourceRepoID := model.MakeID(macroState["source-repository"][0]) + "-sourcecode-repository" - buildPipelineID := model.MakeID(macroState["build-pipeline"][0]) + "-build-pipeline" - artifactRegistryID := model.MakeID(macroState["artifact-registry"][0]) + "-artifact-registry" - containerRepoID, containerPlatformID, containerSharedRuntimeID := "", "", "" - if containerTechUsed { - containerRepoID = model.MakeID(macroState["container-registry"][0]) + "-container-registry" - containerPlatformID = model.MakeID(macroState["container-platform"][0]) + "-container-platform" - containerSharedRuntimeID = model.MakeID(macroState["container-platform"][0]) + "-container-runtime" - } - codeInspectionPlatformID := "" - if codeInspectionUsed { - codeInspectionPlatformID = model.MakeID(macroState["code-inspection-platform"][0]) + "-code-inspection-platform" - } - owner := macroState["owner"][0] - - if _, exists := model.ParsedModelRoot.DataAssets["Sourcecode"]; !exists { - //fmt.Println("Adding data asset:", "sourcecode") // ################################################ - dataAsset := model.InputDataAsset{ - ID: "sourcecode", - Description: "Sourcecode to build the application components from", - Usage: model.DevOps.String(), - Tags: []string{}, - Origin: "", - Owner: owner, - Quantity: model.Few.String(), - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Sourcecode is at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - } - *changeLogCollector = append(*changeLogCollector, "adding data asset: sourcecode") - if !dryRun { - modelInput.Data_assets["Sourcecode"] = dataAsset - } - } - - if _, exists := model.ParsedModelRoot.DataAssets["Deployment"]; !exists { - //fmt.Println("Adding data asset:", "deployment") // ################################################ - dataAsset := model.InputDataAsset{ - ID: "deployment", - Description: "Deployment unit being installed/shipped", - Usage: model.DevOps.String(), - Tags: []string{}, - Origin: "", - Owner: owner, - Quantity: model.VeryFew.String(), - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Deployment units are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - } - *changeLogCollector = append(*changeLogCollector, "adding data asset: deployment") - if !dryRun { - modelInput.Data_assets["Deployment"] = dataAsset - } - } - - id := "development-client" - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - - commLinks := make(map[string]model.InputCommunicationLink) - commLinks["Sourcecode Repository Traffic"] = model.InputCommunicationLink{ - Target: sourceRepoID, - Description: "Sourcecode Repository Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"sourcecode"}, - Data_assets_received: []string{"sourcecode"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - commLinks["Build Pipeline Traffic"] = model.InputCommunicationLink{ - Target: buildPipelineID, - Description: "Build Pipeline Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - commLinks["Artifact Registry Traffic"] = model.InputCommunicationLink{ - Target: artifactRegistryID, - Description: "Artifact Registry Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if containerTechUsed { - commLinks["Container Registry Traffic"] = model.InputCommunicationLink{ - Target: containerRepoID, - Description: "Container Registry Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - commLinks["Container Platform Traffic"] = model.InputCommunicationLink{ - Target: containerPlatformID, - Description: "Container Platform Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - } - if codeInspectionUsed { - commLinks["Code Inspection Platform Traffic"] = model.InputCommunicationLink{ - Target: codeInspectionPlatformID, - Description: "Code Inspection Platform Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.EnduserIdentityPropagation.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"sourcecode"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - } - - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: "Development Client", - Type: model.ExternalEntity.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: true, - Out_of_scope: true, - Justification_out_of_scope: "Development client is not directly in-scope of the application.", - Size: model.System.String(), - Technology: model.DevOpsClient.String(), - Tags: []string{}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Physical.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Sourcecode processing components are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: false, - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"sourcecode", "deployment"}, - Data_assets_stored: []string{"sourcecode", "deployment"}, - Data_formats_accepted: []string{"file"}, - Communication_links: commLinks, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets["Development Client"] = techAsset - } - } - - id = sourceRepoID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["source-repository"][0] + " Sourcecode Repository", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.SourcecodeRepository.String(), - Tags: []string{model.NormalizeTag(macroState["source-repository"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Sourcecode processing components are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"sourcecode"}, - Data_assets_stored: []string{"sourcecode"}, - Data_formats_accepted: []string{"file"}, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["source-repository"][0]+" Sourcecode Repository"] = techAsset - } - } - - if containerTechUsed { - id = containerRepoID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["container-registry"][0] + " Container Registry", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.ArtifactRegistry.String(), - Tags: []string{model.NormalizeTag(macroState["container-registry"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Container registry components are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"deployment"}, - Data_assets_stored: []string{"deployment"}, - Data_formats_accepted: []string{"file"}, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["container-registry"][0]+" Container Registry"] = techAsset - } - } - - id = containerPlatformID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["container-platform"][0] + " Container Platform", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.System.String(), - Technology: model.ContainerPlatform.String(), - Tags: []string{model.NormalizeTag(macroState["container-platform"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.MissionCritical.String(), - Availability: model.MissionCritical.String(), - Justification_cia_rating: "Container platform components are rated as 'mission-critical' in terms of integrity and availability, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"deployment"}, - Data_assets_stored: []string{"deployment"}, - Data_formats_accepted: []string{"file"}, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["container-platform"][0]+" Container Platform"] = techAsset - } - } - } - - id = buildPipelineID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - - commLinks := make(map[string]model.InputCommunicationLink) - commLinks["Sourcecode Repository Traffic"] = model.InputCommunicationLink{ - Target: sourceRepoID, - Description: "Sourcecode Repository Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"sourcecode"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - commLinks["Artifact Registry Traffic"] = model.InputCommunicationLink{ - Target: artifactRegistryID, - Description: "Artifact Registry Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if containerTechUsed { - commLinks["Container Registry Traffic"] = model.InputCommunicationLink{ - Target: containerRepoID, - Description: "Container Registry Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if macroState["push-or-pull"][0] == pushOrPull[0] { // Push - commLinks["Container Platform Push"] = model.InputCommunicationLink{ - Target: containerPlatformID, - Description: "Container Platform Push", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - } else { // Pull - commLinkPull := model.InputCommunicationLink{ - Target: containerRepoID, - Description: "Container Platform Pull", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if !dryRun { - titleOfTargetAsset := macroState["container-platform"][0] + " Container Platform" - containerPlatform := modelInput.Technical_assets[titleOfTargetAsset] - if containerPlatform.Communication_links == nil { - containerPlatform.Communication_links = make(map[string]model.InputCommunicationLink, 0) - } - containerPlatform.Communication_links["Container Platform Pull"] = commLinkPull - modelInput.Technical_assets[titleOfTargetAsset] = containerPlatform - } - } - } - if codeInspectionUsed { - commLinks["Code Inspection Platform Traffic"] = model.InputCommunicationLink{ - Target: codeInspectionPlatformID, - Description: "Code Inspection Platform Traffic", - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"sourcecode"}, - Data_assets_received: []string{}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - } - // The individual deployments - for _, deployTargetID := range macroState["deploy-targets"] { // add a connection to each deployment target - //fmt.Println("Adding deployment flow to:", deployTargetID) - if containerTechUsed { - if !dryRun { - containerPlatform := modelInput.Technical_assets[macroState["container-platform"][0]+" Container Platform"] - if containerPlatform.Communication_links == nil { - containerPlatform.Communication_links = make(map[string]model.InputCommunicationLink, 0) - } - containerPlatform.Communication_links["Container Spawning ("+deployTargetID+")"] = model.InputCommunicationLink{ - Target: deployTargetID, - Description: "Container Spawning " + deployTargetID, - Protocol: model.ContainerSpawning.String(), - Authentication: model.NoneAuthentication.String(), - Authorization: model.NoneAuthorization.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: nil, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - modelInput.Technical_assets[macroState["container-platform"][0]+" Container Platform"] = containerPlatform - } - } else { // No Containers used - if macroState["push-or-pull"][0] == pushOrPull[0] { // Push - commLinks["Deployment Push ("+deployTargetID+")"] = model.InputCommunicationLink{ - Target: deployTargetID, - Description: "Deployment Push to " + deployTargetID, - Protocol: model.SSH.String(), - Authentication: model.ClientCertificate.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"deployment"}, - Data_assets_received: nil, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - } else { // Pull - pullFromWhere := artifactRegistryID - commLinkPull := model.InputCommunicationLink{ - Target: pullFromWhere, - Description: "Deployment Pull from " + deployTargetID, - Protocol: model.HTTPS.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"deployment"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if !dryRun { - // take care to lookup by title (as keyed in input YAML by title and only in parsed model representation by ID) - titleOfTargetAsset := model.ParsedModelRoot.TechnicalAssets[deployTargetID].Title - x := modelInput.Technical_assets[titleOfTargetAsset] - if x.Communication_links == nil { - x.Communication_links = make(map[string]model.InputCommunicationLink, 0) - } - x.Communication_links["Deployment Pull ("+deployTargetID+")"] = commLinkPull - modelInput.Technical_assets[titleOfTargetAsset] = x - } - - } - } - - // don't forget to also add the "deployment" data asset as stored on the target - targetAssetTitle := model.ParsedModelRoot.TechnicalAssets[deployTargetID].Title - assetsStored := make([]string, 0) - if modelInput.Technical_assets[targetAssetTitle].Data_assets_stored != nil { - for _, val := range modelInput.Technical_assets[targetAssetTitle].Data_assets_stored { - assetsStored = append(assetsStored, fmt.Sprintf("%v", val)) - } - } - mergedArrays := make([]string, 0) - for _, val := range assetsStored { - mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) - } - mergedArrays = append(mergedArrays, "deployment") - if !dryRun { - x := modelInput.Technical_assets[targetAssetTitle] - x.Data_assets_stored = mergedArrays - modelInput.Technical_assets[targetAssetTitle] = x - } - } - - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["build-pipeline"][0] + " Build Pipeline", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.BuildPipeline.String(), - Tags: []string{model.NormalizeTag(macroState["build-pipeline"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Build pipeline components are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"sourcecode", "deployment"}, - Data_assets_stored: []string{"sourcecode", "deployment"}, - Data_formats_accepted: []string{"file"}, - Communication_links: commLinks, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["build-pipeline"][0]+" Build Pipeline"] = techAsset - } - } - - id = artifactRegistryID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["artifact-registry"][0] + " Artifact Registry", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.ArtifactRegistry.String(), - Tags: []string{model.NormalizeTag(macroState["artifact-registry"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Important.String(), - Justification_cia_rating: "Artifact registry components are at least rated as 'critical' in terms of integrity, because any " + - "malicious modification of it might lead to a backdoored production system.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"sourcecode", "deployment"}, - Data_assets_stored: []string{"sourcecode", "deployment"}, - Data_formats_accepted: []string{"file"}, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["artifact-registry"][0]+" Artifact Registry"] = techAsset - } - } - - if codeInspectionUsed { - id = codeInspectionPlatformID - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; !exists { - //fmt.Println("Adding technical asset:", id) // ################################################ - serverSideTechAssets = append(serverSideTechAssets, id) - encryption := model.NoneEncryption.String() - if strings.ToLower(macroState["encryption"][0]) == "yes" { - encryption = model.Transparent.String() - } - techAsset := model.InputTechnicalAsset{ - ID: id, - Description: macroState["code-inspection-platform"][0] + " Code Inspection Platform", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.CodeInspectionPlatform.String(), - Tags: []string{model.NormalizeTag(macroState["code-inspection-platform"][0])}, - Internet: strings.ToLower(macroState["internet"][0]) == "yes", - Machine: model.Virtual.String(), - Encryption: encryption, - Owner: owner, - Confidentiality: model.Confidential.String(), - Integrity: model.Important.String(), - Availability: model.Operational.String(), - Justification_cia_rating: "Sourcecode inspection platforms are rated at least 'important' in terms of integrity, because any " + - "malicious modification of it might lead to vulnerabilities found by the scanner engine not being shown.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"sourcecode"}, - Data_assets_stored: []string{"sourcecode"}, - Data_formats_accepted: []string{"file"}, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) - if !dryRun { - modelInput.Technical_assets[macroState["code-inspection-platform"][0]+" Code Inspection Platform"] = techAsset - } - } - } - - if withinTrustBoundary { - if createNewTrustBoundary { - trustBoundaryType := macroState["new-trust-boundary-type"][0] - //fmt.Println("Adding new trust boundary of type:", trustBoundaryType) - title := "DevOps Network" - trustBoundary := model.InputTrustBoundary{ - ID: "devops-network", - Description: "DevOps Network", - Type: trustBoundaryType, - Tags: []string{}, - Technical_assets_inside: serverSideTechAssets, - Trust_boundaries_nested: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding trust boundary: devops-network") - if !dryRun { - modelInput.Trust_boundaries[title] = trustBoundary - } - } else { - existingTrustBoundaryToAddTo := macroState["selected-trust-boundary"][0] - //fmt.Println("Adding to existing trust boundary:", existingTrustBoundaryToAddTo) - title := model.ParsedModelRoot.TrustBoundaries[existingTrustBoundaryToAddTo].Title - assetsInside := make([]string, 0) - if modelInput.Trust_boundaries[title].Technical_assets_inside != nil { - vals := modelInput.Trust_boundaries[title].Technical_assets_inside - for _, val := range vals { - assetsInside = append(assetsInside, fmt.Sprintf("%v", val)) - } - } - mergedArrays := make([]string, 0) - for _, val := range assetsInside { - mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) - } - mergedArrays = append(mergedArrays, serverSideTechAssets...) - *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) - if !dryRun { - if modelInput.Trust_boundaries == nil { - modelInput.Trust_boundaries = make(map[string]model.InputTrustBoundary, 0) - } - tb := modelInput.Trust_boundaries[title] - tb.Technical_assets_inside = mergedArrays - modelInput.Trust_boundaries[title] = tb - } - } - } - - if containerTechUsed { - // create shared runtime - assetsRunning := make([]string, 0) - for _, deployTargetID := range macroState["deploy-targets"] { - assetsRunning = append(assetsRunning, deployTargetID) - } - title := macroState["container-platform"][0] + " Runtime" - sharedRuntime := model.InputSharedRuntime{ - ID: containerSharedRuntimeID, - Description: title, - Tags: []string{model.NormalizeTag(macroState["container-platform"][0])}, - Technical_assets_running: assetsRunning, - } - *changeLogCollector = append(*changeLogCollector, "adding shared runtime: "+containerSharedRuntimeID) - if !dryRun { - if modelInput.Shared_runtimes == nil { - modelInput.Shared_runtimes = make(map[string]model.InputSharedRuntime, 0) - } - modelInput.Shared_runtimes[title] = sharedRuntime - } - } - - return "Changeset valid", true, nil -} diff --git a/macros/built-in/add-vault/add-vault-macro.go b/macros/built-in/add-vault/add-vault-macro.go deleted file mode 100644 index 03ec5f57..00000000 --- a/macros/built-in/add-vault/add-vault-macro.go +++ /dev/null @@ -1,449 +0,0 @@ -package add_vault - -import ( - "fmt" - "github.com/threagile/threagile/model" - "sort" - "strings" -) - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "add-vault", - Title: "Add Vault", - Description: "This model macro adds a vault (secret storage) to the model.", - } -} - -var macroState = make(map[string][]string) -var questionsAnswered = make([]string, 0) -var withinTrustBoundary, createNewTrustBoundary bool - -const createNewTrustBoundaryLabel = "CREATE NEW TRUST BOUNDARY" - -var storageTypes = []string{ - "Cloud Provider (storage buckets or similar)", - "Container Platform (orchestration platform managed storage)", - "Database (SQL-DB, NoSQL-DB, object store or similar)", // TODO let user choose to reuse existing technical asset when shared storage (which would be bad) - "Filesystem (local or remote)", - "In-Memory (no persistent storage of secrets)", - "Service Registry", // TODO let user choose which technical asset the registry is (for comm link) -} - -var authenticationTypes = []string{ - "Certificate", - "Cloud Provider (relying on cloud provider instance authentication)", - "Container Platform (orchestration platform managed authentication)", - "Credentials (username/password, API-key, secret token, etc.)", -} - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - counter := len(questionsAnswered) - if counter > 5 && !withinTrustBoundary { - counter++ - } - if counter > 6 && !createNewTrustBoundary { - counter++ - } - switch counter { - case 0: - return model.MacroQuestion{ - ID: "vault-name", - Title: "What product is used as the vault?", - Description: "This name affects the technical asset's title and ID plus also the tags used.", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 1: - return model.MacroQuestion{ - ID: "storage-type", - Title: "What type of storage is used for the vault?", - Description: "This selection affects the type of technical asset for the persistence.", - PossibleAnswers: storageTypes, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 2: - return model.MacroQuestion{ - ID: "authentication-type", - Title: "What type of authentication is used for accessing the vault?", - Description: "This selection affects the type of communication links.", - PossibleAnswers: authenticationTypes, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 3: - return model.MacroQuestion{ - ID: "multi-tenant", - Title: "Is the vault used by multiple tenants?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "No", - }, nil - case 4: - possibleAnswers := make([]string, 0) - for id, _ := range model.ParsedModelRoot.TechnicalAssets { - possibleAnswers = append(possibleAnswers, id) - } - sort.Strings(possibleAnswers) - if len(possibleAnswers) > 0 { - return model.MacroQuestion{ - ID: "clients", - Title: "Select all technical assets that make use of the vault and access it:", - Description: "This affects the communication links being generated.", - PossibleAnswers: possibleAnswers, - MultiSelect: true, - DefaultAnswer: "", - }, nil - } - case 5: - return model.MacroQuestion{ - ID: "within-trust-boundary", - Title: "Is the vault placed within a network trust boundary?", - Description: "", - PossibleAnswers: []string{"Yes", "No"}, - MultiSelect: false, - DefaultAnswer: "Yes", - }, nil - case 6: - possibleAnswers := []string{createNewTrustBoundaryLabel} - for id, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { - if trustBoundary.Type.IsNetworkBoundary() { - possibleAnswers = append(possibleAnswers, id) - } - } - sort.Strings(possibleAnswers) - return model.MacroQuestion{ - ID: "selected-trust-boundary", - Title: "Choose from the list of existing network trust boundaries or create a new one?", - Description: "", - PossibleAnswers: possibleAnswers, - MultiSelect: false, - DefaultAnswer: "", - }, nil - case 7: - return model.MacroQuestion{ - ID: "new-trust-boundary-type", - Title: "Of which type shall the new trust boundary be?", - Description: "", - PossibleAnswers: []string{model.NetworkOnPrem.String(), - model.NetworkDedicatedHoster.String(), - model.NetworkVirtualLAN.String(), - model.NetworkCloudProvider.String(), - model.NetworkCloudSecurityGroup.String(), - model.NetworkPolicyNamespaceIsolation.String()}, - MultiSelect: false, - DefaultAnswer: model.NetworkOnPrem.String(), - }, nil - } - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - macroState[questionID] = answer - questionsAnswered = append(questionsAnswered, questionID) - if questionID == "within-trust-boundary" { - withinTrustBoundary = strings.ToLower(macroState["within-trust-boundary"][0]) == "yes" - } else if questionID == "selected-trust-boundary" { - createNewTrustBoundary = strings.ToLower(macroState["selected-trust-boundary"][0]) == strings.ToLower(createNewTrustBoundaryLabel) - } - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - if len(questionsAnswered) == 0 { - return "Cannot go back further", false, nil - } - lastQuestionID := questionsAnswered[len(questionsAnswered)-1] - questionsAnswered = questionsAnswered[:len(questionsAnswered)-1] - delete(macroState, lastQuestionID) - return "Undo successful", true, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - changeLogCollector := make([]string, 0) - message, validResult, err = applyChange(modelInput, &changeLogCollector, true) - return changeLogCollector, message, validResult, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - changeLogCollector := make([]string, 0) - message, validResult, err = applyChange(modelInput, &changeLogCollector, false) - return message, validResult, err -} - -func applyChange(modelInput *model.ModelInput, changeLogCollector *[]string, dryRun bool) (message string, validResult bool, err error) { - model.AddTagToModelInput(modelInput, macroState["vault-name"][0], dryRun, changeLogCollector) - - var serverSideTechAssets = make([]string, 0) - - if _, exists := model.ParsedModelRoot.DataAssets["Configuration Secrets"]; !exists { - dataAsset := model.InputDataAsset{ - ID: "configuration-secrets", - Description: "Configuration secrets (like credentials, keys, certificates, etc.) secured and managed by a vault", - Usage: model.DevOps.String(), - Tags: []string{}, - Origin: "", - Owner: "", - Quantity: model.VeryFew.String(), - Confidentiality: model.StrictlyConfidential.String(), - Integrity: model.Critical.String(), - Availability: model.Critical.String(), - Justification_cia_rating: "Configuration secrets are rated as being 'strictly-confidential'.", - } - *changeLogCollector = append(*changeLogCollector, "adding data asset: configuration-secrets") - if !dryRun { - modelInput.Data_assets["Configuration Secrets"] = dataAsset - } - } - - databaseUsed := macroState["storage-type"][0] == storageTypes[2] - filesystemUsed := macroState["storage-type"][0] == storageTypes[3] - inMemoryUsed := macroState["storage-type"][0] == storageTypes[4] - - storageID := "vault-storage" - - if databaseUsed || filesystemUsed { - tech := model.FileServer.String() // TODO ask for local or remote and only local use execution-environment (and add separate tech type LocalFilesystem?) - if databaseUsed { - tech = model.Database.String() - } - if _, exists := model.ParsedModelRoot.TechnicalAssets[storageID]; !exists { - serverSideTechAssets = append(serverSideTechAssets, storageID) - techAsset := model.InputTechnicalAsset{ - ID: storageID, - Description: "Vault Storage", - Type: model.Datastore.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Component.String(), - Technology: tech, - Tags: []string{}, // TODO: let user enter or too detailed for a wizard? - Internet: false, - Machine: model.Virtual.String(), // TODO: let user enter or too detailed for a wizard? - Encryption: model.DataWithSymmetricSharedKey.String(), // can be assumed for a vault product as at least having some good encryption - Owner: "", - Confidentiality: model.Confidential.String(), - Integrity: model.Critical.String(), - Availability: model.Critical.String(), - Justification_cia_rating: "Vault components are only rated as 'confidential' as vaults usually apply a trust barrier to encrypt all data-at-rest with a vault key.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: nil, - Data_assets_stored: []string{"configuration-secrets"}, - Data_formats_accepted: nil, - Communication_links: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset: "+storageID) - if !dryRun { - modelInput.Technical_assets["Vault Storage"] = techAsset - } - } - } - - vaultID := model.MakeID(macroState["vault-name"][0]) + "-vault" - - if _, exists := model.ParsedModelRoot.TechnicalAssets[vaultID]; !exists { - serverSideTechAssets = append(serverSideTechAssets, vaultID) - commLinks := make(map[string]model.InputCommunicationLink) - - if databaseUsed || filesystemUsed { - accessLink := model.InputCommunicationLink{ - Target: storageID, - Description: "Vault Storage Access", - Protocol: model.LocalFileAccess.String(), - Authentication: model.Credentials.String(), - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: false, - Usage: model.DevOps.String(), - Data_assets_sent: []string{"configuration-secrets"}, - Data_assets_received: []string{"configuration-secrets"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - if databaseUsed { - accessLink.Protocol = model.SQL_access_protocol.String() // TODO ask if encrypted and ask if NoSQL? or to detailed for a wizard? - } - commLinks["Vault Storage Access"] = accessLink - } - - authentication := model.NoneAuthentication.String() - if macroState["authentication-type"][0] == authenticationTypes[0] { - authentication = model.ClientCertificate.String() - } else if macroState["authentication-type"][0] == authenticationTypes[1] { - authentication = model.Externalized.String() - } else if macroState["authentication-type"][0] == authenticationTypes[2] { - authentication = model.Externalized.String() - } else if macroState["authentication-type"][0] == authenticationTypes[3] { - authentication = model.Credentials.String() - } - for _, clientID := range macroState["clients"] { // add a connection from each client - clientAccessCommLink := model.InputCommunicationLink{ - Target: vaultID, - Description: "Vault Access Traffic (by " + clientID + ")", - Protocol: model.HTTPS.String(), - Authentication: authentication, - Authorization: model.TechnicalUser.String(), - Tags: []string{}, - VPN: false, - IP_filtered: false, - Readonly: true, - Usage: model.DevOps.String(), - Data_assets_sent: nil, - Data_assets_received: []string{"configuration-secrets"}, - Diagram_tweak_weight: 0, - Diagram_tweak_constraint: false, - } - clientAssetTitle := model.ParsedModelRoot.TechnicalAssets[clientID].Title - if !dryRun { - client := modelInput.Technical_assets[clientAssetTitle] - client.Communication_links["Vault Access ("+clientID+")"] = clientAccessCommLink - modelInput.Technical_assets[clientAssetTitle] = client - } - // don't forget to also add the "configuration-secrets" data asset as processed on the client - assetsProcessed := make([]string, 0) - if modelInput.Technical_assets[clientAssetTitle].Data_assets_processed != nil { - for _, val := range modelInput.Technical_assets[clientAssetTitle].Data_assets_processed { - assetsProcessed = append(assetsProcessed, fmt.Sprintf("%v", val)) - } - } - mergedArrays := make([]string, 0) - for _, val := range assetsProcessed { - mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) - } - mergedArrays = append(mergedArrays, "configuration-secrets") - if !dryRun { - x := modelInput.Technical_assets[clientAssetTitle] - x.Data_assets_processed = mergedArrays - modelInput.Technical_assets[clientAssetTitle] = x - } - } - - techAsset := model.InputTechnicalAsset{ - ID: vaultID, - Description: macroState["vault-name"][0] + " Vault", - Type: model.Process.String(), - Usage: model.DevOps.String(), - Used_as_client_by_human: false, - Out_of_scope: false, - Justification_out_of_scope: "", - Size: model.Service.String(), - Technology: model.Vault.String(), - Tags: []string{model.NormalizeTag(macroState["vault-name"][0])}, - Internet: false, - Machine: model.Virtual.String(), - Encryption: model.Transparent.String(), - Owner: "", - Confidentiality: model.StrictlyConfidential.String(), - Integrity: model.Critical.String(), - Availability: model.Critical.String(), - Justification_cia_rating: "Vault components are rated as 'strictly-confidential'.", - Multi_tenant: strings.ToLower(macroState["multi-tenant"][0]) == "yes", - Redundant: false, - Custom_developed_parts: false, - Data_assets_processed: []string{"configuration-secrets"}, - Data_assets_stored: nil, - Data_formats_accepted: nil, - Communication_links: commLinks, - } - if inMemoryUsed { - techAsset.Data_assets_stored = []string{"configuration-secrets"} - } - *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+vaultID) - if !dryRun { - modelInput.Technical_assets[macroState["vault-name"][0]+" Vault"] = techAsset - } - } - - vaultEnvID := "vault-environment" - if filesystemUsed { - title := "Vault Environment" - trustBoundary := model.InputTrustBoundary{ - ID: vaultEnvID, - Description: "Vault Environment", - Type: model.ExecutionEnvironment.String(), - Tags: []string{}, - Technical_assets_inside: []string{vaultID, storageID}, - Trust_boundaries_nested: nil, - } - *changeLogCollector = append(*changeLogCollector, "adding trust boundary: "+vaultEnvID) - if !dryRun { - modelInput.Trust_boundaries[title] = trustBoundary - } - } - - if withinTrustBoundary { - if createNewTrustBoundary { - trustBoundaryType := macroState["new-trust-boundary-type"][0] - title := "Vault Network" - trustBoundary := model.InputTrustBoundary{ - ID: "vault-network", - Description: "Vault Network", - Type: trustBoundaryType, - Tags: []string{}, - } - if filesystemUsed { - trustBoundary.Trust_boundaries_nested = []string{vaultEnvID} - } else { - trustBoundary.Technical_assets_inside = serverSideTechAssets - } - *changeLogCollector = append(*changeLogCollector, "adding trust boundary: vault-network") - if !dryRun { - modelInput.Trust_boundaries[title] = trustBoundary - } - } else { // adding to existing trust boundary - existingTrustBoundaryToAddTo := macroState["selected-trust-boundary"][0] - title := model.ParsedModelRoot.TrustBoundaries[existingTrustBoundaryToAddTo].Title - - if filesystemUsed { // ---------------------- nest as execution-environment trust boundary ---------------------- - boundariesNested := make([]string, 0) - if modelInput.Trust_boundaries[title].Trust_boundaries_nested != nil { - vals := modelInput.Trust_boundaries[title].Trust_boundaries_nested - for _, val := range vals { - boundariesNested = append(boundariesNested, fmt.Sprintf("%v", val)) - } - } - mergedArrays := make([]string, 0) - for _, val := range boundariesNested { - mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) - } - mergedArrays = append(mergedArrays, vaultEnvID) - *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) - if !dryRun { - tb := modelInput.Trust_boundaries[title] - tb.Trust_boundaries_nested = mergedArrays - modelInput.Trust_boundaries[title] = tb - } - } else { // ---------------------- place assets inside directly ---------------------- - assetsInside := make([]string, 0) - if modelInput.Trust_boundaries[title].Technical_assets_inside != nil { - vals := modelInput.Trust_boundaries[title].Technical_assets_inside - for _, val := range vals { - assetsInside = append(assetsInside, fmt.Sprintf("%v", val)) - } - } - mergedArrays := make([]string, 0) - for _, val := range assetsInside { - mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) - } - mergedArrays = append(mergedArrays, serverSideTechAssets...) - *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) - if !dryRun { - tb := modelInput.Trust_boundaries[title] - tb.Technical_assets_inside = mergedArrays - modelInput.Trust_boundaries[title] = tb - } - } - } - } - - return "Changeset valid", true, nil -} diff --git a/macros/built-in/pretty-print/pretty-print-macro.go b/macros/built-in/pretty-print/pretty-print-macro.go deleted file mode 100644 index 64149c57..00000000 --- a/macros/built-in/pretty-print/pretty-print-macro.go +++ /dev/null @@ -1,31 +0,0 @@ -package pretty_print - -import "github.com/threagile/threagile/model" - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "pretty-print", - Title: "Pretty Print", - Description: "This model macro simply reformats the model file in a pretty-print style.", - } -} - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - return "Cannot go back further", false, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - return []string{"pretty-printing the model file"}, "Changeset valid", true, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - return "Model pretty printing successful", true, nil -} diff --git a/macros/built-in/remove-unused-tags/remove-unused-tags-macro.go b/macros/built-in/remove-unused-tags/remove-unused-tags-macro.go deleted file mode 100644 index f0deaec9..00000000 --- a/macros/built-in/remove-unused-tags/remove-unused-tags-macro.go +++ /dev/null @@ -1,75 +0,0 @@ -package remove_unused_tags - -import ( - "github.com/threagile/threagile/model" - "sort" - "strconv" -) - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "remove-unused-tags", - Title: "Remove Unused Tags", - Description: "This model macro simply removes all unused tags from the model file.", - } -} - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - return "Cannot go back further", false, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - return []string{"remove unused tags from the model file"}, "Changeset valid", true, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - tagUsageMap := make(map[string]bool, 0) - for _, tag := range model.ParsedModelRoot.TagsAvailable { - tagUsageMap[tag] = false // false = tag is not used - } - for _, dA := range model.ParsedModelRoot.DataAssets { - for _, tag := range dA.Tags { - tagUsageMap[tag] = true // true = tag is used - } - } - for _, tA := range model.ParsedModelRoot.TechnicalAssets { - for _, tag := range tA.Tags { - tagUsageMap[tag] = true // true = tag is used - } - for _, cL := range tA.CommunicationLinks { - for _, tag := range cL.Tags { - tagUsageMap[tag] = true // true = tag is used - } - } - } - for _, tB := range model.ParsedModelRoot.TrustBoundaries { - for _, tag := range tB.Tags { - tagUsageMap[tag] = true // true = tag is used - } - } - for _, sR := range model.ParsedModelRoot.SharedRuntimes { - for _, tag := range sR.Tags { - tagUsageMap[tag] = true // true = tag is used - } - } - counter := 0 - tagsSorted := make([]string, 0) - for tag, used := range tagUsageMap { - if used { - tagsSorted = append(tagsSorted, tag) - } else { - counter++ - } - } - sort.Strings(tagsSorted) - modelInput.Tags_available = tagsSorted - return "Model file removal of " + strconv.Itoa(counter) + " unused tags successful", true, nil -} diff --git a/macros/built-in/seed-risk-tracking/seed-risk-tracking-macro.go b/macros/built-in/seed-risk-tracking/seed-risk-tracking-macro.go deleted file mode 100644 index 9a64557a..00000000 --- a/macros/built-in/seed-risk-tracking/seed-risk-tracking-macro.go +++ /dev/null @@ -1,54 +0,0 @@ -package seed_risk_tracking - -import ( - "github.com/threagile/threagile/model" - "sort" - "strconv" -) - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "seed-risk-tracking", - Title: "Seed Risk Tracking", - Description: "This model macro simply seeds the model file with initial risk tracking entries for all untracked risks.", - } -} - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - return "Cannot go back further", false, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - return []string{"seed the model file with with initial risk tracking entries for all untracked risks"}, "Changeset valid", true, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - syntheticRiskIDsToCreateTrackingFor := make([]string, 0) - for id, risk := range model.GeneratedRisksBySyntheticId { - if !risk.IsRiskTracked() { - syntheticRiskIDsToCreateTrackingFor = append(syntheticRiskIDsToCreateTrackingFor, id) - } - } - sort.Strings(syntheticRiskIDsToCreateTrackingFor) - if modelInput.Risk_tracking == nil { - modelInput.Risk_tracking = make(map[string]model.InputRiskTracking, 0) - } - for _, id := range syntheticRiskIDsToCreateTrackingFor { - modelInput.Risk_tracking[id] = model.InputRiskTracking{ - Status: model.Unchecked.String(), - Justification: "", - Ticket: "", - Date: "", - Checked_by: "", - } - } - return "Model file seeding with " + strconv.Itoa(len(syntheticRiskIDsToCreateTrackingFor)) + " initial risk tracking successful", true, nil -} diff --git a/macros/built-in/seed-tags/seed-tags-macro.go b/macros/built-in/seed-tags/seed-tags-macro.go deleted file mode 100644 index fc65c414..00000000 --- a/macros/built-in/seed-tags/seed-tags-macro.go +++ /dev/null @@ -1,48 +0,0 @@ -package seed_tags - -import ( - "github.com/threagile/threagile/model" - "sort" - "strconv" -) - -func GetMacroDetails() model.MacroDetails { - return model.MacroDetails{ - ID: "seed-tags", - Title: "Seed Tags", - Description: "This model macro simply seeds the model file with supported tags from all risk rules.", - } -} - -func GetNextQuestion() (nextQuestion model.MacroQuestion, err error) { - return model.NoMoreQuestions(), nil -} - -func ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { - return "Answer processed", true, nil -} - -func GoBack() (message string, validResult bool, err error) { - return "Cannot go back further", false, nil -} - -func GetFinalChangeImpact(modelInput *model.ModelInput) (changes []string, message string, validResult bool, err error) { - return []string{"seed the model file with supported tags from all risk rules"}, "Changeset valid", true, err -} - -func Execute(modelInput *model.ModelInput) (message string, validResult bool, err error) { - tagMap := make(map[string]bool, 0) - for k, v := range model.AllSupportedTags { - tagMap[k] = v - } - for _, tagFromModel := range model.ParsedModelRoot.TagsAvailable { - tagMap[tagFromModel] = true - } - tagsSorted := make([]string, 0) - for tag := range tagMap { - tagsSorted = append(tagsSorted, tag) - } - sort.Strings(tagsSorted) - modelInput.Tags_available = tagsSorted - return "Model file seeding with " + strconv.Itoa(len(model.AllSupportedTags)) + " tags successful", true, nil -} diff --git a/main.go b/main.go deleted file mode 100644 index 938047aa..00000000 --- a/main.go +++ /dev/null @@ -1,5760 +0,0 @@ -package main - -import ( - "archive/zip" - "bufio" - "bytes" - "compress/gzip" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "crypto/sha256" - "crypto/sha512" - "encoding/base64" - "encoding/hex" - "errors" - "flag" - "fmt" - "hash/fnv" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "os/exec" - "path/filepath" - "plugin" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/gin-gonic/gin" - "github.com/google/uuid" - "github.com/threagile/threagile/colors" - add_build_pipeline "github.com/threagile/threagile/macros/built-in/add-build-pipeline" - add_vault "github.com/threagile/threagile/macros/built-in/add-vault" - pretty_print "github.com/threagile/threagile/macros/built-in/pretty-print" - remove_unused_tags "github.com/threagile/threagile/macros/built-in/remove-unused-tags" - seed_risk_tracking "github.com/threagile/threagile/macros/built-in/seed-risk-tracking" - seed_tags "github.com/threagile/threagile/macros/built-in/seed-tags" - "github.com/threagile/threagile/model" - "github.com/threagile/threagile/report" - accidental_secret_leak "github.com/threagile/threagile/risks/built-in/accidental-secret-leak" - code_backdooring "github.com/threagile/threagile/risks/built-in/code-backdooring" - container_baseimage_backdooring "github.com/threagile/threagile/risks/built-in/container-baseimage-backdooring" - container_platform_escape "github.com/threagile/threagile/risks/built-in/container-platform-escape" - cross_site_request_forgery "github.com/threagile/threagile/risks/built-in/cross-site-request-forgery" - cross_site_scripting "github.com/threagile/threagile/risks/built-in/cross-site-scripting" - dos_risky_access_across_trust_boundary "github.com/threagile/threagile/risks/built-in/dos-risky-access-across-trust-boundary" - incomplete_model "github.com/threagile/threagile/risks/built-in/incomplete-model" - ldap_injection "github.com/threagile/threagile/risks/built-in/ldap-injection" - missing_authentication "github.com/threagile/threagile/risks/built-in/missing-authentication" - missing_authentication_second_factor "github.com/threagile/threagile/risks/built-in/missing-authentication-second-factor" - missing_build_infrastructure "github.com/threagile/threagile/risks/built-in/missing-build-infrastructure" - missing_cloud_hardening "github.com/threagile/threagile/risks/built-in/missing-cloud-hardening" - missing_file_validation "github.com/threagile/threagile/risks/built-in/missing-file-validation" - missing_hardening "github.com/threagile/threagile/risks/built-in/missing-hardening" - missing_identity_propagation "github.com/threagile/threagile/risks/built-in/missing-identity-propagation" - missing_identity_provider_isolation "github.com/threagile/threagile/risks/built-in/missing-identity-provider-isolation" - missing_identity_store "github.com/threagile/threagile/risks/built-in/missing-identity-store" - missing_network_segmentation "github.com/threagile/threagile/risks/built-in/missing-network-segmentation" - missing_vault "github.com/threagile/threagile/risks/built-in/missing-vault" - missing_vault_isolation "github.com/threagile/threagile/risks/built-in/missing-vault-isolation" - missing_waf "github.com/threagile/threagile/risks/built-in/missing-waf" - mixed_targets_on_shared_runtime "github.com/threagile/threagile/risks/built-in/mixed-targets-on-shared-runtime" - path_traversal "github.com/threagile/threagile/risks/built-in/path-traversal" - push_instead_of_pull_deployment "github.com/threagile/threagile/risks/built-in/push-instead-of-pull-deployment" - search_query_injection "github.com/threagile/threagile/risks/built-in/search-query-injection" - server_side_request_forgery "github.com/threagile/threagile/risks/built-in/server-side-request-forgery" - service_registry_poisoning "github.com/threagile/threagile/risks/built-in/service-registry-poisoning" - sql_nosql_injection "github.com/threagile/threagile/risks/built-in/sql-nosql-injection" - unchecked_deployment "github.com/threagile/threagile/risks/built-in/unchecked-deployment" - unencrypted_asset "github.com/threagile/threagile/risks/built-in/unencrypted-asset" - unencrypted_communication "github.com/threagile/threagile/risks/built-in/unencrypted-communication" - unguarded_access_from_internet "github.com/threagile/threagile/risks/built-in/unguarded-access-from-internet" - unguarded_direct_datastore_access "github.com/threagile/threagile/risks/built-in/unguarded-direct-datastore-access" - unnecessary_communication_link "github.com/threagile/threagile/risks/built-in/unnecessary-communication-link" - unnecessary_data_asset "github.com/threagile/threagile/risks/built-in/unnecessary-data-asset" - unnecessary_data_transfer "github.com/threagile/threagile/risks/built-in/unnecessary-data-transfer" - unnecessary_technical_asset "github.com/threagile/threagile/risks/built-in/unnecessary-technical-asset" - untrusted_deserialization "github.com/threagile/threagile/risks/built-in/untrusted-deserialization" - wrong_communication_link_content "github.com/threagile/threagile/risks/built-in/wrong-communication-link-content" - wrong_trust_boundary_content "github.com/threagile/threagile/risks/built-in/wrong-trust-boundary-content" - xml_external_entity "github.com/threagile/threagile/risks/built-in/xml-external-entity" - "golang.org/x/crypto/argon2" - "gopkg.in/yaml.v3" -) - -const keepDiagramSourceFiles = false -const defaultGraphvizDPI, maxGraphvizDPI = 120, 240 - -const backupHistoryFilesToKeep = 50 - -const baseFolder, reportFilename, excelRisksFilename, excelTagsFilename, jsonRisksFilename, jsonTechnicalAssetsFilename, jsonStatsFilename, dataFlowDiagramFilenameDOT, dataFlowDiagramFilenamePNG, dataAssetDiagramFilenameDOT, dataAssetDiagramFilenamePNG, graphvizDataFlowDiagramConversionCall, graphvizDataAssetDiagramConversionCall = "/data", "report.pdf", "risks.xlsx", "tags.xlsx", "risks.json", "technical-assets.json", "stats.json", "data-flow-diagram.gv", "data-flow-diagram.png", "data-asset-diagram.gv", "data-asset-diagram.png", "render-data-flow-diagram.sh", "render-data-asset-diagram.sh" - -var globalLock sync.Mutex -var successCount, errorCount = 0, 0 - -var modelInput model.ModelInput - -var drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = true - -var buildTimestamp = "" - -var modelFilename, templateFilename /*, diagramFilename, reportFilename, graphvizConversion*/ *string -var createExampleModel, createStubModel, createEditingSupport, verbose, ignoreOrphanedRiskTracking, generateDataFlowDiagram, generateDataAssetDiagram, generateRisksJSON, generateTechnicalAssetsJSON, generateStatsJSON, generateRisksExcel, generateTagsExcel, generateReportPDF *bool -var outputDir, raaPlugin, skipRiskRules, riskRulesPlugins, executeModelMacro *string -var customRiskRules map[string]model.CustomRiskRule -var diagramDPI, serverPort *int - -var deferredRiskTrackingDueToWildcardMatching = make(map[string]model.RiskTracking) - -func applyRiskGeneration() { - if *verbose { - fmt.Println("Applying risk generation") - } - skippedRules := make(map[string]interface{}) - if len(*skipRiskRules) > 0 { - for _, id := range strings.Split(*skipRiskRules, ",") { - skippedRules[id] = true - } - } - - if _, ok := skippedRules[unencrypted_asset.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unencrypted_asset.Category().Id) - delete(skippedRules, unencrypted_asset.Category().Id) - } else { - model.AddToListOfSupportedTags(unencrypted_asset.SupportedTags()) - risks := unencrypted_asset.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unencrypted_asset.Category()] = risks - } - } - - if _, ok := skippedRules[unencrypted_communication.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unencrypted_communication.Category().Id) - delete(skippedRules, unencrypted_communication.Category().Id) - } else { - model.AddToListOfSupportedTags(unencrypted_communication.SupportedTags()) - risks := unencrypted_communication.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unencrypted_communication.Category()] = risks - } - } - - if _, ok := skippedRules[unguarded_direct_datastore_access.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unguarded_direct_datastore_access.Category().Id) - delete(skippedRules, unguarded_direct_datastore_access.Category().Id) - } else { - model.AddToListOfSupportedTags(unguarded_direct_datastore_access.SupportedTags()) - risks := unguarded_direct_datastore_access.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unguarded_direct_datastore_access.Category()] = risks - } - } - - if _, ok := skippedRules[unguarded_access_from_internet.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unguarded_access_from_internet.Category().Id) - delete(skippedRules, unguarded_access_from_internet.Category().Id) - } else { - model.AddToListOfSupportedTags(unguarded_access_from_internet.SupportedTags()) - risks := unguarded_access_from_internet.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unguarded_access_from_internet.Category()] = risks - } - } - - if _, ok := skippedRules[dos_risky_access_across_trust_boundary.Category().Id]; ok { - fmt.Println("Skipping risk rule:", dos_risky_access_across_trust_boundary.Category().Id) - delete(skippedRules, dos_risky_access_across_trust_boundary.Category().Id) - } else { - model.AddToListOfSupportedTags(dos_risky_access_across_trust_boundary.SupportedTags()) - risks := dos_risky_access_across_trust_boundary.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[dos_risky_access_across_trust_boundary.Category()] = risks - } - } - - if _, ok := skippedRules[missing_network_segmentation.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_network_segmentation.Category().Id) - delete(skippedRules, missing_network_segmentation.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_network_segmentation.SupportedTags()) - risks := missing_network_segmentation.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_network_segmentation.Category()] = risks - } - } - - if _, ok := skippedRules[mixed_targets_on_shared_runtime.Category().Id]; ok { - fmt.Println("Skipping risk rule:", mixed_targets_on_shared_runtime.Category().Id) - delete(skippedRules, mixed_targets_on_shared_runtime.Category().Id) - } else { - model.AddToListOfSupportedTags(mixed_targets_on_shared_runtime.SupportedTags()) - risks := mixed_targets_on_shared_runtime.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[mixed_targets_on_shared_runtime.Category()] = risks - } - } - - if _, ok := skippedRules[missing_identity_propagation.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_identity_propagation.Category().Id) - delete(skippedRules, missing_identity_propagation.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_identity_propagation.SupportedTags()) - risks := missing_identity_propagation.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_identity_propagation.Category()] = risks - } - } - - if _, ok := skippedRules[missing_identity_store.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_identity_store.Category().Id) - delete(skippedRules, missing_identity_store.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_identity_store.SupportedTags()) - risks := missing_identity_store.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_identity_store.Category()] = risks - } - } - - if _, ok := skippedRules[missing_authentication.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_authentication.Category().Id) - delete(skippedRules, missing_authentication.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_authentication.SupportedTags()) - risks := missing_authentication.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_authentication.Category()] = risks - } - } - - if _, ok := skippedRules[missing_authentication_second_factor.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_authentication_second_factor.Category().Id) - delete(skippedRules, missing_authentication_second_factor.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_authentication_second_factor.SupportedTags()) - risks := missing_authentication_second_factor.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_authentication_second_factor.Category()] = risks - } - } - - if _, ok := skippedRules[unnecessary_data_transfer.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unnecessary_data_transfer.Category().Id) - delete(skippedRules, unnecessary_data_transfer.Category().Id) - } else { - model.AddToListOfSupportedTags(unnecessary_data_transfer.SupportedTags()) - risks := unnecessary_data_transfer.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unnecessary_data_transfer.Category()] = risks - } - } - - if _, ok := skippedRules[unnecessary_communication_link.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unnecessary_communication_link.Category().Id) - delete(skippedRules, unnecessary_communication_link.Category().Id) - } else { - model.AddToListOfSupportedTags(unnecessary_communication_link.SupportedTags()) - risks := unnecessary_communication_link.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unnecessary_communication_link.Category()] = risks - } - } - - if _, ok := skippedRules[unnecessary_technical_asset.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unnecessary_technical_asset.Category().Id) - delete(skippedRules, unnecessary_technical_asset.Category().Id) - } else { - model.AddToListOfSupportedTags(unnecessary_technical_asset.SupportedTags()) - risks := unnecessary_technical_asset.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unnecessary_technical_asset.Category()] = risks - } - } - - if _, ok := skippedRules[unnecessary_data_asset.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unnecessary_data_asset.Category().Id) - delete(skippedRules, unnecessary_data_asset.Category().Id) - } else { - model.AddToListOfSupportedTags(unnecessary_data_asset.SupportedTags()) - risks := unnecessary_data_asset.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unnecessary_data_asset.Category()] = risks - } - } - - if _, ok := skippedRules[sql_nosql_injection.Category().Id]; ok { - fmt.Println("Skipping risk rule:", sql_nosql_injection.Category().Id) - delete(skippedRules, sql_nosql_injection.Category().Id) - } else { - model.AddToListOfSupportedTags(sql_nosql_injection.SupportedTags()) - risks := sql_nosql_injection.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[sql_nosql_injection.Category()] = risks - } - } - - if _, ok := skippedRules[ldap_injection.Category().Id]; ok { - fmt.Println("Skipping risk rule:", ldap_injection.Category().Id) - delete(skippedRules, ldap_injection.Category().Id) - } else { - model.AddToListOfSupportedTags(ldap_injection.SupportedTags()) - risks := ldap_injection.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[ldap_injection.Category()] = risks - } - } - - if _, ok := skippedRules[cross_site_scripting.Category().Id]; ok { - fmt.Println("Skipping risk rule:", cross_site_scripting.Category().Id) - delete(skippedRules, cross_site_scripting.Category().Id) - } else { - model.AddToListOfSupportedTags(cross_site_scripting.SupportedTags()) - risks := cross_site_scripting.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[cross_site_scripting.Category()] = risks - } - } - - if _, ok := skippedRules[cross_site_request_forgery.Category().Id]; ok { - fmt.Println("Skipping risk rule:", cross_site_request_forgery.Category().Id) - delete(skippedRules, cross_site_request_forgery.Category().Id) - } else { - model.AddToListOfSupportedTags(cross_site_request_forgery.SupportedTags()) - risks := cross_site_request_forgery.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[cross_site_request_forgery.Category()] = risks - } - } - - if _, ok := skippedRules[server_side_request_forgery.Category().Id]; ok { - fmt.Println("Skipping risk rule:", server_side_request_forgery.Category().Id) - delete(skippedRules, server_side_request_forgery.Category().Id) - } else { - model.AddToListOfSupportedTags(server_side_request_forgery.SupportedTags()) - risks := server_side_request_forgery.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[server_side_request_forgery.Category()] = risks - } - } - - if _, ok := skippedRules[path_traversal.Category().Id]; ok { - fmt.Println("Skipping risk rule:", path_traversal.Category().Id) - delete(skippedRules, path_traversal.Category().Id) - } else { - model.AddToListOfSupportedTags(path_traversal.SupportedTags()) - risks := path_traversal.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[path_traversal.Category()] = risks - } - } - - if _, ok := skippedRules[push_instead_of_pull_deployment.Category().Id]; ok { - fmt.Println("Skipping risk rule:", push_instead_of_pull_deployment.Category().Id) - delete(skippedRules, push_instead_of_pull_deployment.Category().Id) - } else { - model.AddToListOfSupportedTags(push_instead_of_pull_deployment.SupportedTags()) - risks := push_instead_of_pull_deployment.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[push_instead_of_pull_deployment.Category()] = risks - } - } - - if _, ok := skippedRules[search_query_injection.Category().Id]; ok { - fmt.Println("Skipping risk rule:", search_query_injection.Category().Id) - delete(skippedRules, search_query_injection.Category().Id) - } else { - model.AddToListOfSupportedTags(search_query_injection.SupportedTags()) - risks := search_query_injection.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[search_query_injection.Category()] = risks - } - } - - if _, ok := skippedRules[service_registry_poisoning.Category().Id]; ok { - fmt.Println("Skipping risk rule:", service_registry_poisoning.Category().Id) - delete(skippedRules, service_registry_poisoning.Category().Id) - } else { - model.AddToListOfSupportedTags(service_registry_poisoning.SupportedTags()) - risks := service_registry_poisoning.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[service_registry_poisoning.Category()] = risks - } - } - - if _, ok := skippedRules[untrusted_deserialization.Category().Id]; ok { - fmt.Println("Skipping risk rule:", untrusted_deserialization.Category().Id) - delete(skippedRules, untrusted_deserialization.Category().Id) - } else { - model.AddToListOfSupportedTags(untrusted_deserialization.SupportedTags()) - risks := untrusted_deserialization.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[untrusted_deserialization.Category()] = risks - } - } - - if _, ok := skippedRules[xml_external_entity.Category().Id]; ok { - fmt.Println("Skipping risk rule:", xml_external_entity.Category().Id) - delete(skippedRules, xml_external_entity.Category().Id) - } else { - model.AddToListOfSupportedTags(xml_external_entity.SupportedTags()) - risks := xml_external_entity.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[xml_external_entity.Category()] = risks - } - } - - if _, ok := skippedRules[missing_cloud_hardening.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_cloud_hardening.Category().Id) - delete(skippedRules, missing_cloud_hardening.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_cloud_hardening.SupportedTags()) - risks := missing_cloud_hardening.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_cloud_hardening.Category()] = risks - } - } - - if _, ok := skippedRules[missing_file_validation.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_file_validation.Category().Id) - delete(skippedRules, missing_file_validation.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_file_validation.SupportedTags()) - risks := missing_file_validation.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_file_validation.Category()] = risks - } - } - - if _, ok := skippedRules[missing_hardening.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_hardening.Category().Id) - delete(skippedRules, missing_hardening.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_hardening.SupportedTags()) - risks := missing_hardening.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_hardening.Category()] = risks - } - } - - if _, ok := skippedRules[accidental_secret_leak.Category().Id]; ok { - fmt.Println("Skipping risk rule:", accidental_secret_leak.Category().Id) - delete(skippedRules, accidental_secret_leak.Category().Id) - } else { - model.AddToListOfSupportedTags(accidental_secret_leak.SupportedTags()) - risks := accidental_secret_leak.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[accidental_secret_leak.Category()] = risks - } - } - - if _, ok := skippedRules[code_backdooring.Category().Id]; ok { - fmt.Println("Skipping risk rule:", code_backdooring.Category().Id) - delete(skippedRules, code_backdooring.Category().Id) - } else { - model.AddToListOfSupportedTags(code_backdooring.SupportedTags()) - risks := code_backdooring.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[code_backdooring.Category()] = risks - } - } - - if _, ok := skippedRules[container_baseimage_backdooring.Category().Id]; ok { - fmt.Println("Skipping risk rule:", container_baseimage_backdooring.Category().Id) - delete(skippedRules, container_baseimage_backdooring.Category().Id) - } else { - model.AddToListOfSupportedTags(container_baseimage_backdooring.SupportedTags()) - risks := container_baseimage_backdooring.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[container_baseimage_backdooring.Category()] = risks - } - } - - if _, ok := skippedRules[container_platform_escape.Category().Id]; ok { - fmt.Println("Skipping risk rule:", container_platform_escape.Category().Id) - delete(skippedRules, container_platform_escape.Category().Id) - } else { - model.AddToListOfSupportedTags(container_platform_escape.SupportedTags()) - risks := container_platform_escape.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[container_platform_escape.Category()] = risks - } - } - - if _, ok := skippedRules[incomplete_model.Category().Id]; ok { - fmt.Println("Skipping risk rule:", incomplete_model.Category().Id) - delete(skippedRules, incomplete_model.Category().Id) - } else { - model.AddToListOfSupportedTags(incomplete_model.SupportedTags()) - risks := incomplete_model.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[incomplete_model.Category()] = risks - } - } - - if _, ok := skippedRules[unchecked_deployment.Category().Id]; ok { - fmt.Println("Skipping risk rule:", unchecked_deployment.Category().Id) - delete(skippedRules, unchecked_deployment.Category().Id) - } else { - model.AddToListOfSupportedTags(unchecked_deployment.SupportedTags()) - risks := unchecked_deployment.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[unchecked_deployment.Category()] = risks - } - } - - if _, ok := skippedRules[missing_build_infrastructure.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_build_infrastructure.Category().Id) - delete(skippedRules, missing_build_infrastructure.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_build_infrastructure.SupportedTags()) - risks := missing_build_infrastructure.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_build_infrastructure.Category()] = risks - } - } - - if _, ok := skippedRules[missing_identity_provider_isolation.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_identity_provider_isolation.Category().Id) - delete(skippedRules, missing_identity_provider_isolation.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_identity_provider_isolation.SupportedTags()) - risks := missing_identity_provider_isolation.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_identity_provider_isolation.Category()] = risks - } - } - - if _, ok := skippedRules[missing_vault.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_vault.Category().Id) - delete(skippedRules, missing_vault.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_vault.SupportedTags()) - risks := missing_vault.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_vault.Category()] = risks - } - } - - if _, ok := skippedRules[missing_vault_isolation.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_vault_isolation.Category().Id) - delete(skippedRules, missing_vault_isolation.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_vault_isolation.SupportedTags()) - risks := missing_vault_isolation.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_vault_isolation.Category()] = risks - } - } - - if _, ok := skippedRules[missing_waf.Category().Id]; ok { - fmt.Println("Skipping risk rule:", missing_waf.Category().Id) - delete(skippedRules, missing_waf.Category().Id) - } else { - model.AddToListOfSupportedTags(missing_waf.SupportedTags()) - risks := missing_waf.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[missing_waf.Category()] = risks - } - } - - if _, ok := skippedRules[wrong_communication_link_content.Category().Id]; ok { - fmt.Println("Skipping risk rule:", wrong_communication_link_content.Category().Id) - delete(skippedRules, wrong_communication_link_content.Category().Id) - } else { - model.AddToListOfSupportedTags(wrong_communication_link_content.SupportedTags()) - risks := wrong_communication_link_content.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[wrong_communication_link_content.Category()] = risks - } - } - - if _, ok := skippedRules[wrong_trust_boundary_content.Category().Id]; ok { - fmt.Println("Skipping risk rule:", wrong_trust_boundary_content.Category().Id) - delete(skippedRules, wrong_trust_boundary_content.Category().Id) - } else { - model.AddToListOfSupportedTags(wrong_trust_boundary_content.SupportedTags()) - risks := wrong_trust_boundary_content.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[wrong_trust_boundary_content.Category()] = risks - } - } - - // NOW THE CUSTOM RISK RULES (if any) - for id, customRule := range customRiskRules { - if _, ok := skippedRules[customRule.Category().Id]; ok { - if *verbose { - fmt.Println("Skipping custom risk rule:", id) - } - delete(skippedRules, id) - } else { - if *verbose { - fmt.Println("Executing custom risk rule:", id) - } - model.AddToListOfSupportedTags(customRule.SupportedTags()) - risks := customRule.GenerateRisks() - if len(risks) > 0 { - model.GeneratedRisksByCategory[customRule.Category()] = risks - } - if *verbose { - fmt.Println("Added custom risks:", len(risks)) - } - } - } - - if len(skippedRules) > 0 { - keys := make([]string, 0) - for k := range skippedRules { - keys = append(keys, k) - } - if len(keys) > 0 { - log.Println("Unknown risk rules to skip:", keys) - } - } - - // save also in map keyed by synthetic risk-id - for _, category := range model.SortedRiskCategories() { - risks := model.SortedRisksOfCategory(category) - for _, risk := range risks { - model.GeneratedRisksBySyntheticId[strings.ToLower(risk.SyntheticId)] = risk - } - } -} - -func checkRiskTracking() { - if *verbose { - fmt.Println("Checking risk tracking") - } - for _, tracking := range model.ParsedModelRoot.RiskTracking { - if _, ok := model.GeneratedRisksBySyntheticId[tracking.SyntheticRiskId]; !ok { - if *ignoreOrphanedRiskTracking { - fmt.Println("Risk tracking references unknown risk (risk id not found): " + tracking.SyntheticRiskId) - } else { - panic(errors.New("Risk tracking references unknown risk (risk id not found) - you might want to use the option -ignore-orphaned-risk-tracking: " + tracking.SyntheticRiskId + - "\n\nNOTE: For risk tracking each risk-id needs to be defined (the string with the @ sign in it). " + - "These unique risk IDs are visible in the PDF report (the small grey string under each risk), " + - "the Excel (column \"ID\"), as well as the JSON responses. Some risk IDs have only one @ sign in them, " + - "while others multiple. The idea is to allow for unique but still speaking IDs. Therefore each risk instance " + - "creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. " + - "Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. " + - "Best is to lookup the IDs to use in the created Excel file. Alternatively a model macro \"seed-risk-tracking\" " + - "is available that helps in initially seeding the risk tracking part here based on already identified and not yet handled risks.")) - } - } - } - - // save also the risk-category-id and risk-status directly in the risk for better JSON marshalling - for category, _ := range model.GeneratedRisksByCategory { - for i, _ := range model.GeneratedRisksByCategory[category] { - model.GeneratedRisksByCategory[category][i].CategoryId = category.Id - model.GeneratedRisksByCategory[category][i].RiskStatus = model.GeneratedRisksByCategory[category][i].GetRiskTrackingStatusDefaultingUnchecked() - } - } -} - -// === Error handling stuff ======================================== - -func checkErr(err error) { - if err != nil { - panic(err) - } -} - -func main() { - parseCommandlineArgs() - if *serverPort > 0 { - startServer() - } else { - doIt(*modelFilename, *outputDir) - } -} - -// Unzip will decompress a zip archive, moving all files and folders -// within the zip file (parameter 1) to an output directory (parameter 2). -func unzip(src string, dest string) ([]string, error) { - var filenames []string - - r, err := zip.OpenReader(src) - if err != nil { - return filenames, err - } - defer r.Close() - - for _, f := range r.File { - // Store filename/path for returning and using later on - fpath := filepath.Join(dest, f.Name) - // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE - if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { - return filenames, fmt.Errorf("%s: illegal file path", fpath) - } - filenames = append(filenames, fpath) - if f.FileInfo().IsDir() { - // Make Folder - os.MkdirAll(fpath, os.ModePerm) - continue - } - // Make File - if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil { - return filenames, err - } - outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) - if err != nil { - return filenames, err - } - rc, err := f.Open() - if err != nil { - return filenames, err - } - _, err = io.Copy(outFile, rc) - // Close the file without defer to close before next iteration of loop - outFile.Close() - rc.Close() - if err != nil { - return filenames, err - } - } - return filenames, nil -} - -// ZipFiles compresses one or many files into a single zip archive file. -// Param 1: filename is the output zip file's name. -// Param 2: files is a list of files to add to the zip. -func zipFiles(filename string, files []string) error { - newZipFile, err := os.Create(filename) - if err != nil { - return err - } - defer newZipFile.Close() - - zipWriter := zip.NewWriter(newZipFile) - defer zipWriter.Close() - - // Add files to zip - for _, file := range files { - if err = addFileToZip(zipWriter, file); err != nil { - return err - } - } - return nil -} - -func addFileToZip(zipWriter *zip.Writer, filename string) error { - fileToZip, err := os.Open(filename) - if err != nil { - return err - } - defer fileToZip.Close() - - // Get the file information - info, err := fileToZip.Stat() - if err != nil { - return err - } - - header, err := zip.FileInfoHeader(info) - if err != nil { - return err - } - - // Using FileInfoHeader() above only uses the basename of the file. If we want - // to preserve the folder structure we can overwrite this with the full path. - //header.Name = filename - - // Change to deflate to gain better compression - // see http://golang.org/pkg/archive/zip/#pkg-constants - header.Method = zip.Deflate - - writer, err := zipWriter.CreateHeader(header) - if err != nil { - return err - } - _, err = io.Copy(writer, fileToZip) - return err -} - -func doIt(inputFilename string, outputDirectory string) { - defer func() { - var err error - if r := recover(); r != nil { - err = r.(error) - if *verbose { - log.Println(err) - } - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(2) - } - }() - if len(*executeModelMacro) > 0 { - printLogo() - } else { - if *verbose { - fmt.Println("Writing into output directory:", outputDirectory) - } - } - - model.Init() - parseModel(inputFilename) - introTextRAA := applyRAA() - loadCustomRiskRules() - applyRiskGeneration() - applyWildcardRiskTrackingEvaluation() - checkRiskTracking() - - if len(*executeModelMacro) > 0 { - var macroDetails model.MacroDetails - switch *executeModelMacro { - case add_build_pipeline.GetMacroDetails().ID: - macroDetails = add_build_pipeline.GetMacroDetails() - case add_vault.GetMacroDetails().ID: - macroDetails = add_vault.GetMacroDetails() - case pretty_print.GetMacroDetails().ID: - macroDetails = pretty_print.GetMacroDetails() - case remove_unused_tags.GetMacroDetails().ID: - macroDetails = remove_unused_tags.GetMacroDetails() - case seed_risk_tracking.GetMacroDetails().ID: - macroDetails = seed_risk_tracking.GetMacroDetails() - case seed_tags.GetMacroDetails().ID: - macroDetails = seed_tags.GetMacroDetails() - default: - log.Fatal("Unknown model macro: ", *executeModelMacro) - } - fmt.Println("Executing model macro:", macroDetails.ID) - fmt.Println() - fmt.Println() - printBorder(len(macroDetails.Title), true) - fmt.Println(macroDetails.Title) - printBorder(len(macroDetails.Title), true) - if len(macroDetails.Description) > 0 { - fmt.Println(macroDetails.Description) - } - fmt.Println() - reader := bufio.NewReader(os.Stdin) - var err error - var nextQuestion model.MacroQuestion - for { - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - nextQuestion, err = add_build_pipeline.GetNextQuestion() - case add_vault.GetMacroDetails().ID: - nextQuestion, err = add_vault.GetNextQuestion() - case pretty_print.GetMacroDetails().ID: - nextQuestion, err = pretty_print.GetNextQuestion() - case remove_unused_tags.GetMacroDetails().ID: - nextQuestion, err = remove_unused_tags.GetNextQuestion() - case seed_risk_tracking.GetMacroDetails().ID: - nextQuestion, err = seed_risk_tracking.GetNextQuestion() - case seed_tags.GetMacroDetails().ID: - nextQuestion, err = seed_tags.GetNextQuestion() - } - checkErr(err) - if nextQuestion.NoMoreQuestions() { - break - } - fmt.Println() - printBorder(len(nextQuestion.Title), false) - fmt.Println(nextQuestion.Title) - printBorder(len(nextQuestion.Title), false) - if len(nextQuestion.Description) > 0 { - fmt.Println(nextQuestion.Description) - } - resultingMultiValueSelection := make([]string, 0) - if nextQuestion.IsValueConstrained() { - if nextQuestion.MultiSelect { - selectedValues := make(map[string]bool, 0) - for { - fmt.Println("Please select (multiple executions possible) from the following values (use number to select/deselect):") - fmt.Println(" 0:", "SELECTION PROCESS FINISHED: CONTINUE TO NEXT QUESTION") - for i, val := range nextQuestion.PossibleAnswers { - number := i + 1 - padding, selected := "", " " - if number < 10 { - padding = " " - } - if val, exists := selectedValues[val]; exists && val { - selected = "*" - } - fmt.Println(" "+selected+" "+padding+strconv.Itoa(number)+":", val) - } - fmt.Println() - fmt.Print("Enter number to select/deselect (or 0 when finished): ") - answer, err := reader.ReadString('\n') - // convert CRLF to LF - answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) - checkErr(err) - if val, err := strconv.Atoi(answer); err == nil { // flip selection - if val == 0 { - for key, selected := range selectedValues { - if selected { - resultingMultiValueSelection = append(resultingMultiValueSelection, key) - } - } - break - } else if val > 0 && val <= len(nextQuestion.PossibleAnswers) { - selectedValues[nextQuestion.PossibleAnswers[val-1]] = !selectedValues[nextQuestion.PossibleAnswers[val-1]] - } - } - } - } else { - fmt.Println("Please choose from the following values (enter value directly or use number):") - for i, val := range nextQuestion.PossibleAnswers { - number := i + 1 - padding := "" - if number < 10 { - padding = " " - } - fmt.Println(" "+padding+strconv.Itoa(number)+":", val) - } - } - } - message := "" - validResult := true - if !nextQuestion.IsValueConstrained() || !nextQuestion.MultiSelect { - fmt.Println() - fmt.Println("Enter your answer (use 'BACK' to go one step back or 'QUIT' to quit without executing the model macro)") - fmt.Print("Answer") - if len(nextQuestion.DefaultAnswer) > 0 { - fmt.Print(" (default '" + nextQuestion.DefaultAnswer + "')") - } - fmt.Print(": ") - answer, err := reader.ReadString('\n') - // convert CRLF to LF - answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) - checkErr(err) - if len(answer) == 0 && len(nextQuestion.DefaultAnswer) > 0 { // accepting the default - answer = nextQuestion.DefaultAnswer - } else if nextQuestion.IsValueConstrained() { // convert number to value - if val, err := strconv.Atoi(answer); err == nil { - if val > 0 && val <= len(nextQuestion.PossibleAnswers) { - answer = nextQuestion.PossibleAnswers[val-1] - } - } - } - if strings.ToLower(answer) == "quit" { - fmt.Println("Quitting without executing the model macro") - return - } else if strings.ToLower(answer) == "back" { - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - message, validResult, err = add_build_pipeline.GoBack() - case add_vault.GetMacroDetails().ID: - message, validResult, err = add_vault.GoBack() - case pretty_print.GetMacroDetails().ID: - message, validResult, err = pretty_print.GoBack() - case remove_unused_tags.GetMacroDetails().ID: - message, validResult, err = remove_unused_tags.GoBack() - case seed_risk_tracking.GetMacroDetails().ID: - message, validResult, err = seed_risk_tracking.GoBack() - case seed_tags.GetMacroDetails().ID: - message, validResult, err = seed_tags.GoBack() - } - } else if len(answer) > 0 { // individual answer - if nextQuestion.IsValueConstrained() { - if !nextQuestion.IsMatchingValueConstraint(answer) { - fmt.Println() - fmt.Println(">>> INVALID <<<") - fmt.Println("Answer does not match any allowed value. Please try again:") - continue - } - } - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - message, validResult, err = add_build_pipeline.ApplyAnswer(nextQuestion.ID, answer) - case add_vault.GetMacroDetails().ID: - message, validResult, err = add_vault.ApplyAnswer(nextQuestion.ID, answer) - case pretty_print.GetMacroDetails().ID: - message, validResult, err = pretty_print.ApplyAnswer(nextQuestion.ID, answer) - case remove_unused_tags.GetMacroDetails().ID: - message, validResult, err = remove_unused_tags.ApplyAnswer(nextQuestion.ID, answer) - case seed_risk_tracking.GetMacroDetails().ID: - message, validResult, err = seed_risk_tracking.ApplyAnswer(nextQuestion.ID, answer) - case seed_tags.GetMacroDetails().ID: - message, validResult, err = seed_tags.ApplyAnswer(nextQuestion.ID, answer) - } - } - } else { - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - message, validResult, err = add_build_pipeline.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - case add_vault.GetMacroDetails().ID: - message, validResult, err = add_vault.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - case pretty_print.GetMacroDetails().ID: - message, validResult, err = pretty_print.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - case remove_unused_tags.GetMacroDetails().ID: - message, validResult, err = remove_unused_tags.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - case seed_risk_tracking.GetMacroDetails().ID: - message, validResult, err = seed_risk_tracking.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - case seed_tags.GetMacroDetails().ID: - message, validResult, err = seed_tags.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) - } - } - checkErr(err) - if !validResult { - fmt.Println() - fmt.Println(">>> INVALID <<<") - } - fmt.Println(message) - fmt.Println() - } - for { - fmt.Println() - fmt.Println() - fmt.Println("#################################################################") - fmt.Println("Do you want to execute the model macro (updating the model file)?") - fmt.Println("#################################################################") - fmt.Println() - fmt.Println("The following changes will be applied:") - var changes []string - message := "" - validResult := true - var err error - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - changes, message, validResult, err = add_build_pipeline.GetFinalChangeImpact(&modelInput) - case add_vault.GetMacroDetails().ID: - changes, message, validResult, err = add_vault.GetFinalChangeImpact(&modelInput) - case pretty_print.GetMacroDetails().ID: - changes, message, validResult, err = pretty_print.GetFinalChangeImpact(&modelInput) - case remove_unused_tags.GetMacroDetails().ID: - changes, message, validResult, err = remove_unused_tags.GetFinalChangeImpact(&modelInput) - case seed_risk_tracking.GetMacroDetails().ID: - changes, message, validResult, err = seed_risk_tracking.GetFinalChangeImpact(&modelInput) - case seed_tags.GetMacroDetails().ID: - changes, message, validResult, err = seed_tags.GetFinalChangeImpact(&modelInput) - } - checkErr(err) - for _, change := range changes { - fmt.Println(" -", change) - } - if !validResult { - fmt.Println() - fmt.Println(">>> INVALID <<<") - } - fmt.Println() - fmt.Println(message) - fmt.Println() - fmt.Print("Apply these changes to the model file?\nType Yes or No: ") - answer, err := reader.ReadString('\n') - // convert CRLF to LF - answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) - checkErr(err) - answer = strings.ToLower(answer) - fmt.Println() - if answer == "yes" || answer == "y" { - message := "" - validResult := true - var err error - switch macroDetails.ID { - case add_build_pipeline.GetMacroDetails().ID: - message, validResult, err = add_build_pipeline.Execute(&modelInput) - case add_vault.GetMacroDetails().ID: - message, validResult, err = add_vault.Execute(&modelInput) - case pretty_print.GetMacroDetails().ID: - message, validResult, err = pretty_print.Execute(&modelInput) - case remove_unused_tags.GetMacroDetails().ID: - message, validResult, err = remove_unused_tags.Execute(&modelInput) - case seed_risk_tracking.GetMacroDetails().ID: - message, validResult, err = seed_risk_tracking.Execute(&modelInput) - case seed_tags.GetMacroDetails().ID: - message, validResult, err = seed_tags.Execute(&modelInput) - } - checkErr(err) - if !validResult { - fmt.Println() - fmt.Println(">>> INVALID <<<") - } - fmt.Println(message) - fmt.Println() - backupFilename := inputFilename + ".backup" - fmt.Println("Creating backup model file:", backupFilename) // TODO add random files in /dev/shm space? - _, err = copyFile(inputFilename, backupFilename) - checkErr(err) - fmt.Println("Updating model") - yamlBytes, err := yaml.Marshal(modelInput) - checkErr(err) - /* - yamlBytes = model.ReformatYAML(yamlBytes) - */ - fmt.Println("Writing model file:", inputFilename) - err = ioutil.WriteFile(inputFilename, yamlBytes, 0400) - checkErr(err) - fmt.Println("Model file successfully updated") - return - } else if answer == "no" || answer == "n" { - fmt.Println("Quitting without executing the model macro") - return - } - } - fmt.Println() - return - } - - renderDataFlowDiagram, renderDataAssetDiagram, renderRisksJSON, renderTechnicalAssetsJSON, renderStatsJSON, renderRisksExcel, renderTagsExcel, renderPDF := *generateDataFlowDiagram, *generateDataAssetDiagram, *generateRisksJSON, *generateTechnicalAssetsJSON, *generateStatsJSON, *generateRisksExcel, *generateTagsExcel, *generateReportPDF - if renderPDF { // as the PDF report includes both diagrams - renderDataFlowDiagram, renderDataAssetDiagram = true, true - } - - // Data-flow Diagram rendering - if renderDataFlowDiagram { - gvFile := outputDirectory + "/" + dataFlowDiagramFilenameDOT - if !keepDiagramSourceFiles { - tmpFileGV, err := ioutil.TempFile(model.TempFolder, dataFlowDiagramFilenameDOT) - checkErr(err) - gvFile = tmpFileGV.Name() - defer os.Remove(gvFile) - } - dotFile := writeDataFlowDiagramGraphvizDOT(gvFile, *diagramDPI) - renderDataFlowDiagramGraphvizImage(dotFile, outputDirectory) - } - // Data Asset Diagram rendering - if renderDataAssetDiagram { - gvFile := outputDirectory + "/" + dataAssetDiagramFilenameDOT - if !keepDiagramSourceFiles { - tmpFile, err := ioutil.TempFile(model.TempFolder, dataAssetDiagramFilenameDOT) - checkErr(err) - gvFile = tmpFile.Name() - defer os.Remove(gvFile) - } - dotFile := writeDataAssetDiagramGraphvizDOT(gvFile, *diagramDPI) - renderDataAssetDiagramGraphvizImage(dotFile, outputDirectory) - } - - // risks as risks json - if renderRisksJSON { - if *verbose { - fmt.Println("Writing risks json") - } - report.WriteRisksJSON(outputDirectory + "/" + jsonRisksFilename) - } - - // technical assets json - if renderTechnicalAssetsJSON { - if *verbose { - fmt.Println("Writing technical assets json") - } - report.WriteTechnicalAssetsJSON(outputDirectory + "/" + jsonTechnicalAssetsFilename) - } - - // risks as risks json - if renderStatsJSON { - if *verbose { - fmt.Println("Writing stats json") - } - report.WriteStatsJSON(outputDirectory + "/" + jsonStatsFilename) - } - - // risks Excel - if renderRisksExcel { - if *verbose { - fmt.Println("Writing risks excel") - } - report.WriteRisksExcelToFile(outputDirectory + "/" + excelRisksFilename) - } - - // tags Excel - if renderTagsExcel { - if *verbose { - fmt.Println("Writing tags excel") - } - report.WriteTagsExcelToFile(outputDirectory + "/" + excelTagsFilename) - } - - if renderPDF { - // hash the YAML input file - f, err := os.Open(inputFilename) - checkErr(err) - defer f.Close() - hasher := sha256.New() - if _, err := io.Copy(hasher, f); err != nil { - panic(err) - } - modelHash := hex.EncodeToString(hasher.Sum(nil)) - // report PDF - if *verbose { - fmt.Println("Writing report pdf") - } - report.WriteReportPDF(outputDirectory+"/"+reportFilename, - *templateFilename, - outputDirectory+"/"+dataFlowDiagramFilenamePNG, - outputDirectory+"/"+dataAssetDiagramFilenamePNG, - inputFilename, - *skipRiskRules, - buildTimestamp, - modelHash, - introTextRAA, - customRiskRules) - } -} - -func printBorder(length int, bold bool) { - char := "-" - if bold { - char = "=" - } - for i := 1; i <= length; i++ { - fmt.Print(char) - } - fmt.Println() -} - -func applyRAA() string { - if *verbose { - fmt.Println("Applying RAA calculation:", *raaPlugin) - } - // determine plugin to load - // load plugin: open the ".so" file to load the symbols - plug, err := plugin.Open(*raaPlugin) - checkErr(err) - // look up a symbol (an exported function or variable): in this case, function CalculateRAA - symCalculateRAA, err := plug.Lookup("CalculateRAA") - checkErr(err) - // use the plugin - raaCalcFunc, ok := symCalculateRAA.(func() string) // symCalculateRAA.(func(model.ParsedModel) string) - if !ok { - panic(errors.New("RAA plugin has no 'CalculateRAA() string' function")) - } - // call it - return raaCalcFunc() -} - -func loadCustomRiskRules() { - customRiskRules = make(map[string]model.CustomRiskRule, 0) - if len(*riskRulesPlugins) > 0 { - if *verbose { - fmt.Println("Loading custom risk rules:", *riskRulesPlugins) - } - for _, pluginFile := range strings.Split(*riskRulesPlugins, ",") { - if len(pluginFile) > 0 { - // check that the plugin file to load exists - _, err := os.Stat(pluginFile) - if os.IsNotExist(err) { - log.Fatal("Custom risk rule implementation file not found: ", pluginFile) - } - // load plugin: open the ".so" file to load the symbols - plug, err := plugin.Open(pluginFile) - checkErr(err) - // look up a symbol (an exported function or variable): in this case variable CustomRiskRule - symCustomRiskRule, err := plug.Lookup("CustomRiskRule") - checkErr(err) - // register the risk rule plugin for later use: in this case interface type model.CustomRiskRule (defined above) - symCustomRiskRuleVar, ok := symCustomRiskRule.(model.CustomRiskRule) - if !ok { - panic(errors.New("custom risk rule plugin has no 'CustomRiskRule' variable")) - } - // simply add to a map (just convenience) where key is the category id and value the rule's execution function - ruleID := symCustomRiskRuleVar.Category().Id - customRiskRules[ruleID] = symCustomRiskRuleVar - if *verbose { - fmt.Println("Custom risk rule loaded:", ruleID) - } - } - } - if *verbose { - fmt.Println("Loaded custom risk rules:", len(customRiskRules)) - } - } -} - -var validIdSyntax = regexp.MustCompile(`^[a-zA-Z0-9\-]+$`) - -func checkIdSyntax(id string) { - if !validIdSyntax.MatchString(id) { - panic(errors.New("invalid id syntax used (only letters, numbers, and hyphen allowed): " + id)) - } -} - -func analyze(context *gin.Context) { - execute(context, false) -} -func check(context *gin.Context) { - _, ok := execute(context, true) - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "model is ok", - }) - } -} - -func execute(context *gin.Context, dryRun bool) (yamlContent []byte, ok bool) { - defer func() { - var err error - if r := recover(); r != nil { - errorCount++ - err = r.(error) - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": strings.TrimSpace(err.Error()), - }) - ok = false - } - }() - - dpi, err := strconv.Atoi(context.DefaultQuery("dpi", strconv.Itoa(defaultGraphvizDPI))) - checkErr(err) - - fileUploaded, header, err := context.Request.FormFile("file") - checkErr(err) - - if header.Size > 50000000 { - msg := "maximum model upload file size exceeded (denial-of-service protection)" - log.Println(msg) - context.JSON(http.StatusRequestEntityTooLarge, gin.H{ - "error": msg, - }) - return yamlContent, false - } - - filenameUploaded := strings.TrimSpace(header.Filename) - - tmpInputDir, err := ioutil.TempDir(model.TempFolder, "threagile-input-") - checkErr(err) - defer os.RemoveAll(tmpInputDir) - - tmpModelFile, err := ioutil.TempFile(tmpInputDir, "threagile-model-*") - checkErr(err) - defer os.Remove(tmpModelFile.Name()) - _, err = io.Copy(tmpModelFile, fileUploaded) - checkErr(err) - - yamlFile := tmpModelFile.Name() - - if strings.ToLower(filepath.Ext(filenameUploaded)) == ".zip" { - // unzip first (including the resources like images etc.) - if *verbose { - fmt.Println("Decompressing uploaded archive") - } - filenamesUnzipped, err := unzip(tmpModelFile.Name(), tmpInputDir) - checkErr(err) - found := false - for _, name := range filenamesUnzipped { - if strings.ToLower(filepath.Ext(name)) == ".yaml" { - yamlFile = name - found = true - break - } - } - if !found { - panic(errors.New("no yaml file found in uploaded archive")) - } - } - - tmpOutputDir, err := ioutil.TempDir(model.TempFolder, "threagile-output-") - checkErr(err) - defer os.RemoveAll(tmpOutputDir) - - tmpResultFile, err := ioutil.TempFile(model.TempFolder, "threagile-result-*.zip") - checkErr(err) - defer os.Remove(tmpResultFile.Name()) - - if dryRun { - doItViaRuntimeCall(yamlFile, tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, false, false, true, true, true, 40) - } else { - doItViaRuntimeCall(yamlFile, tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, true, true, true, true, true, true, true, true, dpi) - } - checkErr(err) - - yamlContent, err = ioutil.ReadFile(yamlFile) - checkErr(err) - err = ioutil.WriteFile(tmpOutputDir+"/threagile.yaml", yamlContent, 0400) - checkErr(err) - - if !dryRun { - files := []string{ - tmpOutputDir + "/threagile.yaml", - tmpOutputDir + "/" + dataFlowDiagramFilenamePNG, - tmpOutputDir + "/" + dataAssetDiagramFilenamePNG, - tmpOutputDir + "/" + reportFilename, - tmpOutputDir + "/" + excelRisksFilename, - tmpOutputDir + "/" + excelTagsFilename, - tmpOutputDir + "/" + jsonRisksFilename, - tmpOutputDir + "/" + jsonTechnicalAssetsFilename, - tmpOutputDir + "/" + jsonStatsFilename, - } - if keepDiagramSourceFiles { - files = append(files, tmpOutputDir+"/"+dataFlowDiagramFilenameDOT) - files = append(files, tmpOutputDir+"/"+dataAssetDiagramFilenameDOT) - } - err = zipFiles(tmpResultFile.Name(), files) - checkErr(err) - if *verbose { - log.Println("Streaming back result file: " + tmpResultFile.Name()) - } - context.FileAttachment(tmpResultFile.Name(), "threagile-result.zip") - } - successCount++ - return yamlContent, true -} - -// ultimately to avoid any in-process memory and/or data leaks by the used third party libs like PDF generation: exec and quit -func doItViaRuntimeCall(modelFile string, outputDir string, executeModelMacro string, raaPlugin string, customRiskRulesPlugins string, skipRiskRules string, ignoreOrphanedRiskTracking bool, - generateDataFlowDiagram, generateDataAssetDiagram, generateReportPdf, generateRisksExcel, generateTagsExcel, generateRisksJSON, generateTechnicalAssetsJSON, generateStatsJSON bool, - dpi int) { - // Remember to also add the same args to the exec based sub-process calls! - var cmd *exec.Cmd - args := []string{"-model", modelFile, "-output", outputDir, "-execute-model-macro", executeModelMacro, "-raa-plugin", raaPlugin, "-custom-risk-rules-plugins", customRiskRulesPlugins, "-skip-risk-rules", skipRiskRules, "-diagram-dpi", strconv.Itoa(dpi)} - if *verbose { - args = append(args, "-verbose") - } - if ignoreOrphanedRiskTracking { // TODO why add all them as arguments, when they are also variables on outer level? - args = append(args, "-ignore-orphaned-risk-tracking") - } - if generateDataFlowDiagram { - args = append(args, "-generate-data-flow-diagram") - } - if generateDataAssetDiagram { - args = append(args, "-generate-data-asset-diagram") - } - if generateReportPdf { - args = append(args, "-generate-report-pdf") - } - if generateRisksExcel { - args = append(args, "-generate-risks-excel") - } - if generateTagsExcel { - args = append(args, "-generate-tags-excel") - } - if generateRisksJSON { - args = append(args, "-generate-risks-json") - } - if generateTechnicalAssetsJSON { - args = append(args, "-generate-technical-assets-json") - } - if generateStatsJSON { - args = append(args, "-generate-stats-json") - } - self := os.Args[0] - cmd = exec.Command(self, args...) - out, err := cmd.CombinedOutput() - if err != nil { - panic(errors.New(string(out))) - } else { - if *verbose && len(out) > 0 { - fmt.Println("---") - fmt.Print(string(out)) - fmt.Println("---") - } - } -} - -func startServer() { - router := gin.Default() - router.LoadHTMLGlob("server/static/*.html") - router.GET("/", func(c *gin.Context) { - c.HTML(http.StatusOK, "index.html", gin.H{}) - }) - router.HEAD("/", func(c *gin.Context) { - c.HTML(http.StatusOK, "index.html", gin.H{}) - }) - router.StaticFile("/threagile.png", "server/static/threagile.png") - router.StaticFile("/site.webmanifest", "server/static/site.webmanifest") - router.StaticFile("/favicon.ico", "server/static/favicon.ico") - router.StaticFile("/favicon-32x32.png", "server/static/favicon-32x32.png") - router.StaticFile("/favicon-16x16.png", "server/static/favicon-16x16.png") - router.StaticFile("/apple-touch-icon.png", "server/static/apple-touch-icon.png") - router.StaticFile("/android-chrome-512x512.png", "server/static/android-chrome-512x512.png") - router.StaticFile("/android-chrome-192x192.png", "server/static/android-chrome-192x192.png") - - router.StaticFile("/schema.json", "schema.json") - router.StaticFile("/live-templates.txt", "live-templates.txt") - router.StaticFile("/openapi.yaml", "openapi.yaml") - router.StaticFile("/swagger-ui/", "server/static/swagger-ui/index.html") - router.StaticFile("/swagger-ui/index.html", "server/static/swagger-ui/index.html") - router.StaticFile("/swagger-ui/oauth2-redirect.html", "server/static/swagger-ui/oauth2-redirect.html") - router.StaticFile("/swagger-ui/swagger-ui.css", "server/static/swagger-ui/swagger-ui.css") - router.StaticFile("/swagger-ui/swagger-ui.js", "server/static/swagger-ui/swagger-ui.js") - router.StaticFile("/swagger-ui/swagger-ui-bundle.js", "server/static/swagger-ui/swagger-ui-bundle.js") - router.StaticFile("/swagger-ui/swagger-ui-standalone-preset.js", "server/static/swagger-ui/swagger-ui-standalone-preset.js") - - router.GET("/threagile-example-model.yaml", exampleFile) - router.GET("/threagile-stub-model.yaml", stubFile) - - router.GET("/meta/ping", func(c *gin.Context) { - c.JSON(200, gin.H{ - "message": "pong", - }) - }) - router.GET("/meta/version", func(c *gin.Context) { - c.JSON(200, gin.H{ - "version": model.ThreagileVersion, - "build_timestamp": buildTimestamp, - }) - }) - router.GET("/meta/types", func(c *gin.Context) { - c.JSON(200, gin.H{ - "quantity": arrayOfStringValues(model.QuantityValues()), - "confidentiality": arrayOfStringValues(model.ConfidentialityValues()), - "criticality": arrayOfStringValues(model.CriticalityValues()), - "technical_asset_type": arrayOfStringValues(model.TechnicalAssetTypeValues()), - "technical_asset_size": arrayOfStringValues(model.TechnicalAssetSizeValues()), - "authorization": arrayOfStringValues(model.AuthorizationValues()), - "authentication": arrayOfStringValues(model.AuthenticationValues()), - "usage": arrayOfStringValues(model.UsageValues()), - "encryption": arrayOfStringValues(model.EncryptionStyleValues()), - "data_format": arrayOfStringValues(model.DataFormatValues()), - "protocol": arrayOfStringValues(model.ProtocolValues()), - "technical_asset_technology": arrayOfStringValues(model.TechnicalAssetTechnologyValues()), - "technical_asset_machine": arrayOfStringValues(model.TechnicalAssetMachineValues()), - "trust_boundary_type": arrayOfStringValues(model.TrustBoundaryTypeValues()), - "data_breach_probability": arrayOfStringValues(model.DataBreachProbabilityValues()), - "risk_severity": arrayOfStringValues(model.RiskSeverityValues()), - "risk_exploitation_likelihood": arrayOfStringValues(model.RiskExploitationLikelihoodValues()), - "risk_exploitation_impact": arrayOfStringValues(model.RiskExploitationImpactValues()), - "risk_function": arrayOfStringValues(model.RiskFunctionValues()), - "risk_status": arrayOfStringValues(model.RiskStatusValues()), - "stride": arrayOfStringValues(model.STRIDEValues()), - }) - }) - - // TODO router.GET("/meta/risk-rules", listRiskRules) - // TODO router.GET("/meta/model-macros", listModelMacros) - - router.GET("/meta/stats", stats) - - router.POST("/direct/analyze", analyze) - router.POST("/direct/check", check) - router.GET("/direct/stub", stubFile) - - router.POST("/auth/keys", createKey) - router.DELETE("/auth/keys", deleteKey) - router.POST("/auth/tokens", createToken) - router.DELETE("/auth/tokens", deleteToken) - - router.POST("/models", createNewModel) - router.GET("/models", listModels) - router.DELETE("/models/:model-id", deleteModel) - router.GET("/models/:model-id", getModel) - router.PUT("/models/:model-id", importModel) - router.GET("/models/:model-id/data-flow-diagram", streamDataFlowDiagram) - router.GET("/models/:model-id/data-asset-diagram", streamDataAssetDiagram) - router.GET("/models/:model-id/report-pdf", streamReportPDF) - router.GET("/models/:model-id/risks-excel", streamRisksExcel) - router.GET("/models/:model-id/tags-excel", streamTagsExcel) - router.GET("/models/:model-id/risks", streamRisksJSON) - router.GET("/models/:model-id/technical-assets", streamTechnicalAssetsJSON) - router.GET("/models/:model-id/stats", streamStatsJSON) - router.GET("/models/:model-id/analysis", analyzeModelOnServerDirectly) - - router.GET("/models/:model-id/cover", getCover) - router.PUT("/models/:model-id/cover", setCover) - router.GET("/models/:model-id/overview", getOverview) - router.PUT("/models/:model-id/overview", setOverview) - //router.GET("/models/:model-id/questions", getQuestions) - //router.PUT("/models/:model-id/questions", setQuestions) - router.GET("/models/:model-id/abuse-cases", getAbuseCases) - router.PUT("/models/:model-id/abuse-cases", setAbuseCases) - router.GET("/models/:model-id/security-requirements", getSecurityRequirements) - router.PUT("/models/:model-id/security-requirements", setSecurityRequirements) - //router.GET("/models/:model-id/tags", getTags) - //router.PUT("/models/:model-id/tags", setTags) - - router.GET("/models/:model-id/data-assets", getDataAssets) - router.POST("/models/:model-id/data-assets", createNewDataAsset) - router.GET("/models/:model-id/data-assets/:data-asset-id", getDataAsset) - router.PUT("/models/:model-id/data-assets/:data-asset-id", setDataAsset) - router.DELETE("/models/:model-id/data-assets/:data-asset-id", deleteDataAsset) - - router.GET("/models/:model-id/trust-boundaries", getTrustBoundaries) - // router.POST("/models/:model-id/trust-boundaries", createNewTrustBoundary) - // router.GET("/models/:model-id/trust-boundaries/:trust-boundary-id", getTrustBoundary) - // router.PUT("/models/:model-id/trust-boundaries/:trust-boundary-id", setTrustBoundary) - // router.DELETE("/models/:model-id/trust-boundaries/:trust-boundary-id", deleteTrustBoundary) - - router.GET("/models/:model-id/shared-runtimes", getSharedRuntimes) - router.POST("/models/:model-id/shared-runtimes", createNewSharedRuntime) - router.GET("/models/:model-id/shared-runtimes/:shared-runtime-id", getSharedRuntime) - router.PUT("/models/:model-id/shared-runtimes/:shared-runtime-id", setSharedRuntime) - router.DELETE("/models/:model-id/shared-runtimes/:shared-runtime-id", deleteSharedRuntime) - - fmt.Println("Threagile server running...") - router.Run(":" + strconv.Itoa(*serverPort)) // listen and serve on 0.0.0.0:8080 or whatever port was specified -} - -func exampleFile(context *gin.Context) { - example, err := ioutil.ReadFile("/app/threagile-example-model.yaml") - checkErr(err) - context.Data(http.StatusOK, gin.MIMEYAML, example) -} - -func stubFile(context *gin.Context) { - stub, err := ioutil.ReadFile("/app/threagile-stub-model.yaml") - checkErr(err) - context.Data(http.StatusOK, gin.MIMEYAML, addSupportedTags(stub)) // TODO use also the MIMEYAML way of serving YAML in model export? -} - -func addSupportedTags(input []byte) []byte { - // add distinct tags as "tags_available" - supportedTags := make(map[string]bool, 0) - for _, customRule := range customRiskRules { - for _, tag := range customRule.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - } - for _, tag := range accidental_secret_leak.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range code_backdooring.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range container_baseimage_backdooring.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range container_platform_escape.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range cross_site_request_forgery.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range cross_site_scripting.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range dos_risky_access_across_trust_boundary.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range incomplete_model.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range ldap_injection.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_authentication.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_authentication_second_factor.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_build_infrastructure.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_cloud_hardening.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_file_validation.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_hardening.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_identity_propagation.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_identity_provider_isolation.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_identity_store.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_network_segmentation.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_vault.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_vault_isolation.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range missing_waf.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range mixed_targets_on_shared_runtime.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range path_traversal.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range push_instead_of_pull_deployment.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range search_query_injection.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range server_side_request_forgery.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range service_registry_poisoning.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range sql_nosql_injection.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unchecked_deployment.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unencrypted_asset.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unencrypted_communication.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unguarded_access_from_internet.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unguarded_direct_datastore_access.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unnecessary_communication_link.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unnecessary_data_asset.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unnecessary_data_transfer.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range unnecessary_technical_asset.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range untrusted_deserialization.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range wrong_communication_link_content.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range wrong_trust_boundary_content.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - for _, tag := range xml_external_entity.SupportedTags() { - supportedTags[strings.ToLower(tag)] = true - } - tags := make([]string, 0, len(supportedTags)) - for t := range supportedTags { - tags = append(tags, t) - } - if len(tags) == 0 { - return input - } - sort.Strings(tags) - if *verbose { - fmt.Print("Supported tags of all risk rules: ") - for i, tag := range tags { - if i > 0 { - fmt.Print(", ") - } - fmt.Print(tag) - } - fmt.Println() - } - replacement := "tags_available:" - for _, tag := range tags { - replacement += "\n - " + tag - } - return []byte(strings.Replace(string(input), "tags_available:", replacement, 1)) -} - -const keySize = 32 - -type timeoutStruct struct { - xorRand []byte - createdNanotime, lastAcessedNanotime int64 -} - -var mapTokenHashToTimeoutStruct = make(map[string]timeoutStruct) -var mapFolderNameToTokenHash = make(map[string]string) - -func createToken(context *gin.Context) { - folderName, key, ok := checkKeyToFolderName(context) - if !ok { - return - } - globalLock.Lock() - defer globalLock.Unlock() - if tokenHash, exists := mapFolderNameToTokenHash[folderName]; exists { - // invalidate previous token - delete(mapTokenHashToTimeoutStruct, tokenHash) - } - // create a strong random 256 bit value (used to xor) - xorBytesArr := make([]byte, keySize) - n, err := rand.Read(xorBytesArr[:]) - if n != keySize || err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to create token", - }) - return - } - now := time.Now().UnixNano() - token := xor(key, xorBytesArr) - tokenHash := hashSHA256(token) - housekeepingTokenMaps() - mapTokenHashToTimeoutStruct[tokenHash] = timeoutStruct{ - xorRand: xorBytesArr, - createdNanotime: now, - lastAcessedNanotime: now, - } - mapFolderNameToTokenHash[folderName] = tokenHash - context.JSON(http.StatusCreated, gin.H{ - "token": base64.RawURLEncoding.EncodeToString(token[:]), - }) -} - -func deleteToken(context *gin.Context) { - header := tokenHeader{} - if err := context.ShouldBindHeader(&header); err != nil { - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return - } - token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token)) - if len(token) == 0 || err != nil { - if err != nil { - log.Println(err) - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return - } - globalLock.Lock() - defer globalLock.Unlock() - deleteTokenHashFromMaps(hashSHA256(token)) - context.JSON(http.StatusOK, gin.H{ - "message": "token deleted", - }) -} - -const extremeShortTimeoutsForTesting = false - -func housekeepingTokenMaps() { - now := time.Now().UnixNano() - for tokenHash, val := range mapTokenHashToTimeoutStruct { - if extremeShortTimeoutsForTesting { - // remove all elements older than 1 minute (= 60000000000 ns) soft - // and all elements older than 3 minutes (= 180000000000 ns) hard - if now-val.lastAcessedNanotime > 60000000000 || now-val.createdNanotime > 180000000000 { - fmt.Println("About to remove a token hash from maps") - deleteTokenHashFromMaps(tokenHash) - } - } else { - // remove all elements older than 30 minutes (= 1800000000000 ns) soft - // and all elements older than 10 hours (= 36000000000000 ns) hard - if now-val.lastAcessedNanotime > 1800000000000 || now-val.createdNanotime > 36000000000000 { - deleteTokenHashFromMaps(tokenHash) - } - } - } -} - -func deleteTokenHashFromMaps(tokenHash string) { - delete(mapTokenHashToTimeoutStruct, tokenHash) - for folderName, check := range mapFolderNameToTokenHash { - if check == tokenHash { - delete(mapFolderNameToTokenHash, folderName) - break - } - } -} - -func xor(key []byte, xor []byte) []byte { - if len(key) != len(xor) { - panic(errors.New("key length not matching XOR length")) - } - result := make([]byte, len(xor)) - for i, b := range key { - result[i] = b ^ xor[i] - } - return result -} - -func analyzeModelOnServerDirectly(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer func() { - unlockFolder(folderNameOfKey) - var err error - if r := recover(); r != nil { - err = r.(error) - if *verbose { - log.Println(err) - } - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": strings.TrimSpace(err.Error()), - }) - ok = false - } - }() - - dpi, err := strconv.Atoi(context.DefaultQuery("dpi", strconv.Itoa(defaultGraphvizDPI))) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - - _, yamlText, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if !ok { - return - } - tmpModelFile, err := ioutil.TempFile(model.TempFolder, "threagile-direct-analyze-*") - if err != nil { - handleErrorInServiceCall(err, context) - return - } - defer os.Remove(tmpModelFile.Name()) - tmpOutputDir, err := ioutil.TempDir(model.TempFolder, "threagile-direct-analyze-") - if err != nil { - handleErrorInServiceCall(err, context) - return - } - defer os.RemoveAll(tmpOutputDir) - tmpResultFile, err := ioutil.TempFile(model.TempFolder, "threagile-result-*.zip") - checkErr(err) - defer os.Remove(tmpResultFile.Name()) - - err = ioutil.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400) - - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, true, true, true, true, true, true, true, true, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - err = ioutil.WriteFile(tmpOutputDir+"/threagile.yaml", []byte(yamlText), 0400) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - - files := []string{ - tmpOutputDir + "/threagile.yaml", - tmpOutputDir + "/" + dataFlowDiagramFilenamePNG, - tmpOutputDir + "/" + dataAssetDiagramFilenamePNG, - tmpOutputDir + "/" + reportFilename, - tmpOutputDir + "/" + excelRisksFilename, - tmpOutputDir + "/" + excelTagsFilename, - tmpOutputDir + "/" + jsonRisksFilename, - tmpOutputDir + "/" + jsonTechnicalAssetsFilename, - tmpOutputDir + "/" + jsonStatsFilename, - } - if keepDiagramSourceFiles { - files = append(files, tmpOutputDir+"/"+dataFlowDiagramFilenameDOT) - files = append(files, tmpOutputDir+"/"+dataAssetDiagramFilenameDOT) - } - err = zipFiles(tmpResultFile.Name(), files) - checkErr(err) - if *verbose { - fmt.Println("Streaming back result file: " + tmpResultFile.Name()) - } - context.FileAttachment(tmpResultFile.Name(), "threagile-result.zip") -} - -type responseType int - -const ( - dataFlowDiagram responseType = iota - dataAssetDiagram - reportPDF - risksExcel - tagsExcel - risksJSON - technicalAssetsJSON - statsJSON -) - -func streamDataFlowDiagram(context *gin.Context) { - streamResponse(context, dataFlowDiagram) -} -func streamDataAssetDiagram(context *gin.Context) { - streamResponse(context, dataAssetDiagram) -} -func streamReportPDF(context *gin.Context) { - streamResponse(context, reportPDF) -} -func streamRisksExcel(context *gin.Context) { - streamResponse(context, risksExcel) -} -func streamTagsExcel(context *gin.Context) { - streamResponse(context, tagsExcel) -} -func streamRisksJSON(context *gin.Context) { - streamResponse(context, risksJSON) -} -func streamTechnicalAssetsJSON(context *gin.Context) { - streamResponse(context, technicalAssetsJSON) -} -func streamStatsJSON(context *gin.Context) { - streamResponse(context, statsJSON) -} -func streamResponse(context *gin.Context, responseType responseType) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer func() { - unlockFolder(folderNameOfKey) - var err error - if r := recover(); r != nil { - err = r.(error) - if *verbose { - log.Println(err) - } - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": strings.TrimSpace(err.Error()), - }) - ok = false - } - }() - dpi, err := strconv.Atoi(context.DefaultQuery("dpi", strconv.Itoa(defaultGraphvizDPI))) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - _, yamlText, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if !ok { - return - } - tmpModelFile, err := ioutil.TempFile(model.TempFolder, "threagile-render-*") - if err != nil { - handleErrorInServiceCall(err, context) - return - } - defer os.Remove(tmpModelFile.Name()) - tmpOutputDir, err := ioutil.TempDir(model.TempFolder, "threagile-render-") - if err != nil { - handleErrorInServiceCall(err, context) - return - } - defer os.RemoveAll(tmpOutputDir) - err = ioutil.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400) - if responseType == dataFlowDiagram { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, true, false, false, false, false, false, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.File(tmpOutputDir + "/" + dataFlowDiagramFilenamePNG) - } else if responseType == dataAssetDiagram { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, true, false, false, false, false, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.File(tmpOutputDir + "/" + dataAssetDiagramFilenamePNG) - } else if responseType == reportPDF { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, true, false, false, false, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.FileAttachment(tmpOutputDir+"/"+reportFilename, reportFilename) - } else if responseType == risksExcel { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, true, false, false, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.FileAttachment(tmpOutputDir+"/"+excelRisksFilename, excelRisksFilename) - } else if responseType == tagsExcel { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, false, true, false, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.FileAttachment(tmpOutputDir+"/"+excelTagsFilename, excelTagsFilename) - } else if responseType == risksJSON { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, false, false, true, false, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - json, err := ioutil.ReadFile(tmpOutputDir + "/" + jsonRisksFilename) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.Data(http.StatusOK, "application/json", json) // stream directly with JSON content-type in response instead of file download - } else if responseType == technicalAssetsJSON { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, false, false, true, true, false, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - json, err := ioutil.ReadFile(tmpOutputDir + "/" + jsonTechnicalAssetsFilename) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.Data(http.StatusOK, "application/json", json) // stream directly with JSON content-type in response instead of file download - } else if responseType == statsJSON { - doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, *executeModelMacro, *raaPlugin, *riskRulesPlugins, *skipRiskRules, *ignoreOrphanedRiskTracking, false, false, false, false, false, false, false, true, dpi) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - json, err := ioutil.ReadFile(tmpOutputDir + "/" + jsonStatsFilename) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - context.Data(http.StatusOK, "application/json", json) // stream directly with JSON content-type in response instead of file download - } -} - -// fully replaces threagile.yaml in sub-folder given by UUID -func importModel(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - - uuid := context.Param("model-id") // UUID is syntactically validated in readModel+checkModelFolder (next line) via uuid.Parse(modelUUID) - _, _, ok = readModel(context, uuid, key, folderNameOfKey) - if ok { - // first analyze it simply by executing the full risk process (just discard the result) to ensure that everything would work - yamlContent, ok := execute(context, true) - if ok { - // if we're here, then no problem was raised, so ok to proceed - ok = writeModelYAML(context, string(yamlContent), key, folderNameForModel(folderNameOfKey, uuid), "Model Import", false) - if ok { - context.JSON(http.StatusCreated, gin.H{ - "message": "model imported", - }) - } - } - } -} - -func stats(context *gin.Context) { - keyCount, modelCount := 0, 0 - keyFolders, err := ioutil.ReadDir(baseFolder) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to collect stats", - }) - return - } - for _, keyFolder := range keyFolders { - if len(keyFolder.Name()) == 128 { // it's a sha512 token hash probably, so count it as token folder for the stats - keyCount++ - modelFolders, err := ioutil.ReadDir(baseFolder + "/" + keyFolder.Name()) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to collect stats", - }) - return - } - for _, modelFolder := range modelFolders { - if len(modelFolder.Name()) == 36 { // it's a uuid model folder probably, so count it as model folder for the stats - modelCount++ - } - } - } - } - // TODO collect and deliver more stats (old model count?) and health info - context.JSON(http.StatusOK, gin.H{ - "key_count": keyCount, - "model_count": modelCount, - "success_count": successCount, - "error_count": errorCount, - }) -} - -func getDataAsset(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, dataAsset := range modelInput.Data_assets { - if dataAsset.ID == context.Param("data-asset-id") { - context.JSON(http.StatusOK, gin.H{ - title: dataAsset, - }) - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "data asset not found", - }) - } -} - -func deleteDataAsset(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - referencesDeleted := false - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, dataAsset := range modelInput.Data_assets { - if dataAsset.ID == context.Param("data-asset-id") { - // also remove all usages of this data asset !! - for _, techAsset := range modelInput.Technical_assets { - if techAsset.Data_assets_processed != nil { - for i, parsedChangeCandidateAsset := range techAsset.Data_assets_processed { - referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) - if referencedAsset == dataAsset.ID { // apply the removal - referencesDeleted = true - // Remove the element at index i - // TODO needs more testing - copy(techAsset.Data_assets_processed[i:], techAsset.Data_assets_processed[i+1:]) // Shift a[i+1:] left one index. - techAsset.Data_assets_processed[len(techAsset.Data_assets_processed)-1] = "" // Erase last element (write zero value). - techAsset.Data_assets_processed = techAsset.Data_assets_processed[:len(techAsset.Data_assets_processed)-1] // Truncate slice. - } - } - } - if techAsset.Data_assets_stored != nil { - for i, parsedChangeCandidateAsset := range techAsset.Data_assets_stored { - referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) - if referencedAsset == dataAsset.ID { // apply the removal - referencesDeleted = true - // Remove the element at index i - // TODO needs more testing - copy(techAsset.Data_assets_stored[i:], techAsset.Data_assets_stored[i+1:]) // Shift a[i+1:] left one index. - techAsset.Data_assets_stored[len(techAsset.Data_assets_stored)-1] = "" // Erase last element (write zero value). - techAsset.Data_assets_stored = techAsset.Data_assets_stored[:len(techAsset.Data_assets_stored)-1] // Truncate slice. - } - } - } - if techAsset.Communication_links != nil { - for title, commLink := range techAsset.Communication_links { - for i, dataAssetSent := range commLink.Data_assets_sent { - referencedAsset := fmt.Sprintf("%v", dataAssetSent) - if referencedAsset == dataAsset.ID { // apply the removal - referencesDeleted = true - // Remove the element at index i - // TODO needs more testing - copy(techAsset.Communication_links[title].Data_assets_sent[i:], techAsset.Communication_links[title].Data_assets_sent[i+1:]) // Shift a[i+1:] left one index. - techAsset.Communication_links[title].Data_assets_sent[len(techAsset.Communication_links[title].Data_assets_sent)-1] = "" // Erase last element (write zero value). - x := techAsset.Communication_links[title] - x.Data_assets_sent = techAsset.Communication_links[title].Data_assets_sent[:len(techAsset.Communication_links[title].Data_assets_sent)-1] // Truncate slice. - techAsset.Communication_links[title] = x - } - } - for i, dataAssetReceived := range commLink.Data_assets_received { - referencedAsset := fmt.Sprintf("%v", dataAssetReceived) - if referencedAsset == dataAsset.ID { // apply the removal - referencesDeleted = true - // Remove the element at index i - // TODO needs more testing - copy(techAsset.Communication_links[title].Data_assets_received[i:], techAsset.Communication_links[title].Data_assets_received[i+1:]) // Shift a[i+1:] left one index. - techAsset.Communication_links[title].Data_assets_received[len(techAsset.Communication_links[title].Data_assets_received)-1] = "" // Erase last element (write zero value). - x := techAsset.Communication_links[title] - x.Data_assets_received = techAsset.Communication_links[title].Data_assets_received[:len(techAsset.Communication_links[title].Data_assets_received)-1] // Truncate slice. - techAsset.Communication_links[title] = x - } - } - } - } - } - for indivRiskCatTitle, indivRiskCat := range modelInput.Individual_risk_categories { - if indivRiskCat.Risks_identified != nil { - for indivRiskInstanceTitle, indivRiskInstance := range indivRiskCat.Risks_identified { - if indivRiskInstance.Most_relevant_data_asset == dataAsset.ID { // apply the removal - referencesDeleted = true - x := modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] - x.Most_relevant_data_asset = "" // TODO needs more testing - modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] = x - } - } - } - } - // remove it itself - delete(modelInput.Data_assets, title) - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Data Asset Deletion") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "data asset deleted", - "id": dataAsset.ID, - "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well - }) - } - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "data asset not found", - }) - } -} - -func setSharedRuntime(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, sharedRuntime := range modelInput.Shared_runtimes { - if sharedRuntime.ID == context.Param("shared-runtime-id") { - payload := payloadSharedRuntime{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - sharedRuntimeInput, ok := populateSharedRuntime(context, payload) - if !ok { - return - } - // in order to also update the title, remove the shared runtime from the map and re-insert it (with new key) - delete(modelInput.Shared_runtimes, title) - modelInput.Shared_runtimes[payload.Title] = sharedRuntimeInput - idChanged := sharedRuntimeInput.ID != sharedRuntime.ID - if idChanged { // ID-CHANGE-PROPAGATION - for indivRiskCatTitle, indivRiskCat := range modelInput.Individual_risk_categories { - if indivRiskCat.Risks_identified != nil { - for indivRiskInstanceTitle, indivRiskInstance := range indivRiskCat.Risks_identified { - if indivRiskInstance.Most_relevant_shared_runtime == sharedRuntime.ID { // apply the ID change - x := modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] - x.Most_relevant_shared_runtime = sharedRuntimeInput.ID // TODO needs more testing - modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] = x - } - } - } - } - } - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Shared Runtime Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "shared runtime updated", - "id": sharedRuntimeInput.ID, - "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded - }) - } - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "shared runtime not found", - }) - } -} - -func setDataAsset(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, dataAsset := range modelInput.Data_assets { - if dataAsset.ID == context.Param("data-asset-id") { - payload := payloadDataAsset{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - dataAssetInput, ok := populateDataAsset(context, payload) - if !ok { - return - } - // in order to also update the title, remove the asset from the map and re-insert it (with new key) - delete(modelInput.Data_assets, title) - modelInput.Data_assets[payload.Title] = dataAssetInput - idChanged := dataAssetInput.ID != dataAsset.ID - if idChanged { // ID-CHANGE-PROPAGATION - // also update all usages to point to the new (changed) ID !! - for techAssetTitle, techAsset := range modelInput.Technical_assets { - if techAsset.Data_assets_processed != nil { - for i, parsedChangeCandidateAsset := range techAsset.Data_assets_processed { - referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) - if referencedAsset == dataAsset.ID { // apply the ID change - modelInput.Technical_assets[techAssetTitle].Data_assets_processed[i] = dataAssetInput.ID - } - } - } - if techAsset.Data_assets_stored != nil { - for i, parsedChangeCandidateAsset := range techAsset.Data_assets_stored { - referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) - if referencedAsset == dataAsset.ID { // apply the ID change - modelInput.Technical_assets[techAssetTitle].Data_assets_stored[i] = dataAssetInput.ID - } - } - } - if techAsset.Communication_links != nil { - for title, commLink := range techAsset.Communication_links { - for i, dataAssetSent := range commLink.Data_assets_sent { - referencedAsset := fmt.Sprintf("%v", dataAssetSent) - if referencedAsset == dataAsset.ID { // apply the ID change - modelInput.Technical_assets[techAssetTitle].Communication_links[title].Data_assets_sent[i] = dataAssetInput.ID - } - } - for i, dataAssetReceived := range commLink.Data_assets_received { - referencedAsset := fmt.Sprintf("%v", dataAssetReceived) - if referencedAsset == dataAsset.ID { // apply the ID change - modelInput.Technical_assets[techAssetTitle].Communication_links[title].Data_assets_received[i] = dataAssetInput.ID - } - } - } - } - } - for indivRiskCatTitle, indivRiskCat := range modelInput.Individual_risk_categories { - if indivRiskCat.Risks_identified != nil { - for indivRiskInstanceTitle, indivRiskInstance := range indivRiskCat.Risks_identified { - if indivRiskInstance.Most_relevant_data_asset == dataAsset.ID { // apply the ID change - x := modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] - x.Most_relevant_data_asset = dataAssetInput.ID // TODO needs more testing - modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] = x - } - } - } - } - } - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Data Asset Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "data asset updated", - "id": dataAssetInput.ID, - "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded - }) - } - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "data asset not found", - }) - } -} - -func getSharedRuntime(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, sharedRuntime := range modelInput.Shared_runtimes { - if sharedRuntime.ID == context.Param("shared-runtime-id") { - context.JSON(http.StatusOK, gin.H{ - title: sharedRuntime, - }) - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "shared runtime not found", - }) - } -} - -func createNewSharedRuntime(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadSharedRuntime{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - // yes, here keyed by title in YAML for better readability in the YAML file itself - if _, exists := modelInput.Shared_runtimes[payload.Title]; exists { - context.JSON(http.StatusConflict, gin.H{ - "error": "shared runtime with this title already exists", - }) - return - } - // but later it will in memory keyed by it's "id", so do this uniqueness check also - for _, runtime := range modelInput.Shared_runtimes { - if runtime.ID == payload.Id { - context.JSON(http.StatusConflict, gin.H{ - "error": "shared runtime with this id already exists", - }) - return - } - } - if !checkTechnicalAssetsExisting(modelInput, payload.Technical_assets_running) { - context.JSON(http.StatusBadRequest, gin.H{ - "error": "referenced technical asset does not exist", - }) - return - } - sharedRuntimeInput, ok := populateSharedRuntime(context, payload) - if !ok { - return - } - if modelInput.Shared_runtimes == nil { - modelInput.Shared_runtimes = make(map[string]model.InputSharedRuntime) - } - modelInput.Shared_runtimes[payload.Title] = sharedRuntimeInput - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Shared Runtime Creation") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "shared runtime created", - "id": sharedRuntimeInput.ID, - }) - } - } -} - -func checkTechnicalAssetsExisting(modelInput model.ModelInput, techAssetIDs []string) (ok bool) { - for _, techAssetID := range techAssetIDs { - exists := false - for _, val := range modelInput.Technical_assets { - if val.ID == techAssetID { - exists = true - break - } - } - if !exists { - return false - } - } - return true -} - -func populateSharedRuntime(context *gin.Context, payload payloadSharedRuntime) (sharedRuntimeInput model.InputSharedRuntime, ok bool) { - sharedRuntimeInput = model.InputSharedRuntime{ - ID: payload.Id, - Description: payload.Description, - Tags: lowerCaseAndTrim(payload.Tags), - Technical_assets_running: payload.Technical_assets_running, - } - return sharedRuntimeInput, true -} - -func deleteSharedRuntime(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - referencesDeleted := false - // yes, here keyed by title in YAML for better readability in the YAML file itself - for title, sharedRuntime := range modelInput.Shared_runtimes { - if sharedRuntime.ID == context.Param("shared-runtime-id") { - // also remove all usages of this shared runtime !! - for indivRiskCatTitle, indivRiskCat := range modelInput.Individual_risk_categories { - if indivRiskCat.Risks_identified != nil { - for indivRiskInstanceTitle, indivRiskInstance := range indivRiskCat.Risks_identified { - if indivRiskInstance.Most_relevant_shared_runtime == sharedRuntime.ID { // apply the removal - referencesDeleted = true - x := modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] - x.Most_relevant_shared_runtime = "" // TODO needs more testing - modelInput.Individual_risk_categories[indivRiskCatTitle].Risks_identified[indivRiskInstanceTitle] = x - } - } - } - } - // remove it itself - delete(modelInput.Shared_runtimes, title) - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Shared Runtime Deletion") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "shared runtime deleted", - "id": sharedRuntime.ID, - "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well - }) - } - return - } - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "shared runtime not found", - }) - } -} - -func createNewDataAsset(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadDataAsset{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - // yes, here keyed by title in YAML for better readability in the YAML file itself - if _, exists := modelInput.Data_assets[payload.Title]; exists { - context.JSON(http.StatusConflict, gin.H{ - "error": "data asset with this title already exists", - }) - return - } - // but later it will in memory keyed by it's "id", so do this uniqueness check also - for _, asset := range modelInput.Data_assets { - if asset.ID == payload.Id { - context.JSON(http.StatusConflict, gin.H{ - "error": "data asset with this id already exists", - }) - return - } - } - dataAssetInput, ok := populateDataAsset(context, payload) - if !ok { - return - } - if modelInput.Data_assets == nil { - modelInput.Data_assets = make(map[string]model.InputDataAsset) - } - modelInput.Data_assets[payload.Title] = dataAssetInput - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Data Asset Creation") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "data asset created", - "id": dataAssetInput.ID, - }) - } - } -} - -func populateDataAsset(context *gin.Context, payload payloadDataAsset) (dataAssetInput model.InputDataAsset, ok bool) { - usage, err := model.ParseUsage(payload.Usage) - if err != nil { - handleErrorInServiceCall(err, context) - return dataAssetInput, false - } - quantity, err := model.ParseQuantity(payload.Quantity) - if err != nil { - handleErrorInServiceCall(err, context) - return dataAssetInput, false - } - confidentiality, err := model.ParseConfidentiality(payload.Confidentiality) - if err != nil { - handleErrorInServiceCall(err, context) - return dataAssetInput, false - } - integrity, err := model.ParseCriticality(payload.Integrity) - if err != nil { - handleErrorInServiceCall(err, context) - return dataAssetInput, false - } - availability, err := model.ParseCriticality(payload.Availability) - if err != nil { - handleErrorInServiceCall(err, context) - return dataAssetInput, false - } - dataAssetInput = model.InputDataAsset{ - ID: payload.Id, - Description: payload.Description, - Usage: usage.String(), - Tags: lowerCaseAndTrim(payload.Tags), - Origin: payload.Origin, - Owner: payload.Owner, - Quantity: quantity.String(), - Confidentiality: confidentiality.String(), - Integrity: integrity.String(), - Availability: availability.String(), - Justification_cia_rating: payload.Justification_cia_rating, - } - return dataAssetInput, true -} - -func getDataAssets(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, model.Data_assets) - } -} - -func getTrustBoundaries(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, model.Trust_boundaries) - } -} - -func getSharedRuntimes(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, model.Shared_runtimes) - } -} - -func arrayOfStringValues(values []model.TypeEnum) []string { - result := make([]string, 0) - for _, value := range values { - result = append(result, value.String()) - } - return result -} - -func getModel(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - _, yamlText, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - tmpResultFile, err := ioutil.TempFile(model.TempFolder, "threagile-*.yaml") - checkErr(err) - err = ioutil.WriteFile(tmpResultFile.Name(), []byte(yamlText), 0400) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to stream model file", - }) - return - } - defer os.Remove(tmpResultFile.Name()) - context.FileAttachment(tmpResultFile.Name(), "threagile.yaml") - } -} - -type payloadModels struct { - ID string `json:"id"` - Title string `json:"title"` - Timestamp_created time.Time `json:"timestamp_created"` - Timestamp_modified time.Time `json:"timestamp_modified"` -} - -type payloadCover struct { - Title string `json:"title"` - Date time.Time `json:"date"` - Author model.Author `json:"author"` -} - -type payloadOverview struct { - Management_summary_comment string `json:"management_summary_comment"` - Business_criticality string `json:"business_criticality"` - Business_overview model.Overview `json:"business_overview"` - Technical_overview model.Overview `json:"technical_overview"` -} - -type payloadAbuseCases map[string]string - -type payloadSecurityRequirements map[string]string - -type payloadDataAsset struct { - Title string `json:"title"` - Id string `json:"id"` - Description string `json:"description"` - Usage string `json:"usage"` - Tags []string `json:"tags"` - Origin string `json:"origin"` - Owner string `json:"owner"` - Quantity string `json:"quantity"` - Confidentiality string `json:"confidentiality"` - Integrity string `json:"integrity"` - Availability string `json:"availability"` - Justification_cia_rating string `json:"justification_cia_rating"` -} - -type payloadSharedRuntime struct { - Title string `json:"title"` - Id string `json:"id"` - Description string `json:"description"` - Tags []string `json:"tags"` - Technical_assets_running []string `json:"technical_assets_running"` -} - -func setSecurityRequirements(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadSecurityRequirements{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - modelInput.Security_requirements = payload - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Security Requirements Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "model updated", - }) - } - } -} - -func getSecurityRequirements(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, model.Security_requirements) - } -} - -func setAbuseCases(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadAbuseCases{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - modelInput.Abuse_cases = payload - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Abuse Cases Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "model updated", - }) - } - } -} - -func getAbuseCases(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, model.Abuse_cases) - } -} - -func setOverview(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadOverview{} - err := context.BindJSON(&payload) - if err != nil { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - criticality, err := model.ParseCriticality(payload.Business_criticality) - if err != nil { - handleErrorInServiceCall(err, context) - return - } - modelInput.Management_summary_comment = payload.Management_summary_comment - modelInput.Business_criticality = criticality.String() - modelInput.Business_overview.Description = payload.Business_overview.Description - modelInput.Business_overview.Images = payload.Business_overview.Images - modelInput.Technical_overview.Description = payload.Technical_overview.Description - modelInput.Technical_overview.Images = payload.Technical_overview.Images - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Overview Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "model updated", - }) - } - } -} - -func handleErrorInServiceCall(err error, context *gin.Context) { - log.Println(err) - context.JSON(http.StatusBadRequest, gin.H{ - "error": strings.TrimSpace(err.Error()), - }) -} - -func getOverview(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, gin.H{ - "management_summary_comment": model.Management_summary_comment, - "business_criticality": model.Business_criticality, - "business_overview": model.Business_overview, - "technical_overview": model.Technical_overview, - }) - } -} - -func setCover(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - modelInput, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - payload := payloadCover{} - err := context.BindJSON(&payload) - if err != nil { - context.JSON(http.StatusBadRequest, gin.H{ - "error": "unable to parse request payload", - }) - return - } - modelInput.Title = payload.Title - if !payload.Date.IsZero() { - modelInput.Date = payload.Date.Format("2006-01-02") - } - modelInput.Author.Name = payload.Author.Name - modelInput.Author.Homepage = payload.Author.Homepage - ok = writeModel(context, key, folderNameOfKey, &modelInput, "Cover Update") - if ok { - context.JSON(http.StatusOK, gin.H{ - "message": "model updated", - }) - } - } -} - -func getCover(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - model, _, ok := readModel(context, context.Param("model-id"), key, folderNameOfKey) - if ok { - context.JSON(http.StatusOK, gin.H{ - "title": model.Title, - "date": model.Date, - "author": model.Author, - }) - } -} - -// creates a sub-folder (named by a new UUID) inside the token folder -func createNewModel(context *gin.Context) { - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - ok = checkObjectCreationThrottler(context, "MODEL") - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - - uuid := uuid.New().String() - err := os.Mkdir(folderNameForModel(folderNameOfKey, uuid), 0700) - if err != nil { - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to create model", - }) - return - } - - yaml := `title: New Threat Model -threagile_version: ` + model.ThreagileVersion + ` -author: - name: "" - homepage: "" -date: -business_overview: - description: "" - images: [] -technical_overview: - description: "" - images: [] -business_criticality: "" -management_summary_comment: "" -questions: {} -abuse_cases: {} -security_requirements: {} -tags_available: [] -data_assets: {} -technical_assets: {} -trust_boundaries: {} -shared_runtimes: {} -individual_risk_categories: {} -risk_tracking: {} -diagram_tweak_nodesep: "" -diagram_tweak_ranksep: "" -diagram_tweak_edge_layout: "" -diagram_tweak_suppress_edge_labels: false -diagram_tweak_invisible_connections_between_assets: [] -diagram_tweak_same_rank_assets: []` - - ok = writeModelYAML(context, yaml, key, folderNameForModel(folderNameOfKey, uuid), "New Model Creation", true) - if ok { - context.JSON(http.StatusCreated, gin.H{ - "message": "model created", - "id": uuid, - }) - } -} - -func listModels(context *gin.Context) { // TODO currently returns error when any model is no longer valid in syntax, so eventually have some fallback to not just bark on an invalid model... - folderNameOfKey, key, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - - result := make([]payloadModels, 0) - modelFolders, err := ioutil.ReadDir(folderNameOfKey) - if err != nil { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return - } - for _, fileInfo := range modelFolders { - if fileInfo.IsDir() { - modelStat, err := os.Stat(folderNameOfKey + "/" + fileInfo.Name() + "/threagile.yaml") - if err != nil { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "unable to list model", - }) - return - } - model, _, ok := readModel(context, fileInfo.Name(), key, folderNameOfKey) - if !ok { - return - } - result = append(result, payloadModels{ - ID: fileInfo.Name(), - Title: model.Title, - Timestamp_created: fileInfo.ModTime(), - Timestamp_modified: modelStat.ModTime(), - }) - } - } - context.JSON(http.StatusOK, result) -} - -func deleteModel(context *gin.Context) { - folderNameOfKey, _, ok := checkTokenToFolderName(context) - if !ok { - return - } - lockFolder(folderNameOfKey) - defer unlockFolder(folderNameOfKey) - folder, ok := checkModelFolder(context, context.Param("model-id"), folderNameOfKey) - if ok { - err := os.RemoveAll(folder) - if err != nil { - context.JSON(http.StatusNotFound, gin.H{ - "error": "model not found", - }) - } - context.JSON(http.StatusOK, gin.H{ - "message": "model deleted", - }) - } -} - -func checkModelFolder(context *gin.Context, modelUUID string, folderNameOfKey string) (modelFolder string, ok bool) { - uuidParsed, err := uuid.Parse(modelUUID) - if err != nil { - context.JSON(http.StatusNotFound, gin.H{ - "error": "model not found", - }) - return modelFolder, false - } - modelFolder = folderNameForModel(folderNameOfKey, uuidParsed.String()) - if _, err := os.Stat(modelFolder); os.IsNotExist(err) { - context.JSON(http.StatusNotFound, gin.H{ - "error": "model not found", - }) - return modelFolder, false - } - return modelFolder, true -} - -func readModel(context *gin.Context, modelUUID string, key []byte, folderNameOfKey string) (modelInputResult model.ModelInput, yamlText string, ok bool) { - modelFolder, ok := checkModelFolder(context, modelUUID, folderNameOfKey) - if !ok { - return modelInputResult, yamlText, false - } - cryptoKey := generateKeyFromAlreadyStrongRandomInput(key) - block, err := aes.NewCipher(cryptoKey) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - aesgcm, err := cipher.NewGCM(block) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - - fileBytes, err := ioutil.ReadFile(modelFolder + "/threagile.yaml") - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - - nonce := fileBytes[0:12] - ciphertext := fileBytes[12:] - plaintext, err := aesgcm.Open(nil, nonce, ciphertext, nil) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - - r, err := gzip.NewReader(bytes.NewReader(plaintext)) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - buf := new(bytes.Buffer) - buf.ReadFrom(r) - modelInput := model.ModelInput{} - yamlBytes := buf.Bytes() - err = yaml.Unmarshal(yamlBytes, &modelInput) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to open model", - }) - return modelInputResult, yamlText, false - } - return modelInput, string(yamlBytes), true -} - -func writeModel(context *gin.Context, key []byte, folderNameOfKey string, modelInput *model.ModelInput, changeReasonForHistory string) (ok bool) { - modelFolder, ok := checkModelFolder(context, context.Param("model-id"), folderNameOfKey) - if ok { - modelInput.Threagile_version = model.ThreagileVersion - yamlBytes, err := yaml.Marshal(modelInput) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - /* - yamlBytes = model.ReformatYAML(yamlBytes) - */ - return writeModelYAML(context, string(yamlBytes), key, modelFolder, changeReasonForHistory, false) - } - return false -} - -func writeModelYAML(context *gin.Context, yaml string, key []byte, modelFolder string, changeReasonForHistory string, skipBackup bool) (ok bool) { - if *verbose { - fmt.Println("about to write " + strconv.Itoa(len(yaml)) + " bytes of yaml into model folder: " + modelFolder) - } - var b bytes.Buffer - w := gzip.NewWriter(&b) - w.Write([]byte(yaml)) - w.Close() - plaintext := b.Bytes() - cryptoKey := generateKeyFromAlreadyStrongRandomInput(key) - block, err := aes.NewCipher(cryptoKey) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - // Never use more than 2^32 random nonces with a given key because of the risk of a repeat. - nonce := make([]byte, 12) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - aesgcm, err := cipher.NewGCM(block) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - ciphertext := aesgcm.Seal(nil, nonce, plaintext, nil) - if !skipBackup { - err = backupModelToHistory(modelFolder, changeReasonForHistory) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - } - f, err := os.Create(modelFolder + "/threagile.yaml") - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to write model", - }) - return false - } - f.Write(nonce) - f.Write(ciphertext) - f.Close() - return true -} - -func backupModelToHistory(modelFolder string, changeReasonForHistory string) (err error) { - historyFolder := modelFolder + "/history" - if _, err := os.Stat(historyFolder); os.IsNotExist(err) { - err = os.Mkdir(historyFolder, 0700) - if err != nil { - return err - } - } - input, err := ioutil.ReadFile(modelFolder + "/threagile.yaml") - if err != nil { - return err - } - historyFile := historyFolder + "/" + time.Now().Format("2006-01-02 15:04:05") + " " + changeReasonForHistory + ".backup" - err = ioutil.WriteFile(historyFile, input, 0400) - if err != nil { - return err - } - // now delete any old files if over limit to keep - files, err := ioutil.ReadDir(historyFolder) - if err != nil { - return err - } - if len(files) > backupHistoryFilesToKeep { - requiredToDelete := len(files) - backupHistoryFilesToKeep - sort.Slice(files, func(i, j int) bool { - return files[i].Name() < files[j].Name() - }) - for _, file := range files { - requiredToDelete-- - err = os.Remove(historyFolder + "/" + file.Name()) - if err != nil { - return err - } - if requiredToDelete <= 0 { - break - } - } - } - return -} - -type argon2Params struct { - memory uint32 - iterations uint32 - parallelism uint8 - saltLength uint32 - keyLength uint32 -} - -func generateKeyFromAlreadyStrongRandomInput(alreadyRandomInput []byte) []byte { - // Establish the parameters to use for Argon2. - p := &argon2Params{ - memory: 64 * 1024, - iterations: 3, - parallelism: 2, - saltLength: 16, - keyLength: keySize, - } - // As the input is already cryptographically secure random, the salt is simply the first n bytes - salt := alreadyRandomInput[0:p.saltLength] - hash := argon2.IDKey(alreadyRandomInput[p.saltLength:], salt, p.iterations, p.memory, p.parallelism, p.keyLength) - return hash -} - -func folderNameForModel(folderNameOfKey string, uuid string) string { - return folderNameOfKey + "/" + uuid -} - -var throttlerLock sync.Mutex -var createdObjectsThrottler = make(map[string][]int64) - -func checkObjectCreationThrottler(context *gin.Context, typeName string) bool { - throttlerLock.Lock() - defer throttlerLock.Unlock() - - // remove all elements older than 3 minutes (= 180000000000 ns) - now := time.Now().UnixNano() - cutoff := now - 180000000000 - for keyCheck, _ := range createdObjectsThrottler { - for i := 0; i < len(createdObjectsThrottler[keyCheck]); i++ { - if createdObjectsThrottler[keyCheck][i] < cutoff { - // Remove the element at index i from slice (safe while looping using i as iterator) - createdObjectsThrottler[keyCheck] = append(createdObjectsThrottler[keyCheck][:i], createdObjectsThrottler[keyCheck][i+1:]...) - i-- // Since we just deleted a[i], we must redo that index - } - } - length := len(createdObjectsThrottler[keyCheck]) - if length == 0 { - delete(createdObjectsThrottler, keyCheck) - } - /* - if *verbose { - log.Println("Throttling count: "+strconv.Itoa(length)) - } - */ - } - - // check current request - keyHash := hash(typeName) // getting the real client ip is not easy inside fully encapsulated containerized runtime - if _, ok := createdObjectsThrottler[keyHash]; !ok { - createdObjectsThrottler[keyHash] = make([]int64, 0) - } - // check the limit of 20 creations for this type per 3 minutes - withinLimit := len(createdObjectsThrottler[keyHash]) < 20 - if withinLimit { - createdObjectsThrottler[keyHash] = append(createdObjectsThrottler[keyHash], now) - return true - } - context.JSON(http.StatusTooManyRequests, gin.H{ - "error": "object creation throttling exceeded (denial-of-service protection): please wait some time and try again", - }) - return false -} - -var locksByFolderName = make(map[string]*sync.Mutex) - -func lockFolder(folderName string) { - globalLock.Lock() - defer globalLock.Unlock() - _, exists := locksByFolderName[folderName] - if !exists { - locksByFolderName[folderName] = &sync.Mutex{} - } - locksByFolderName[folderName].Lock() -} - -func unlockFolder(folderName string) { - if _, exists := locksByFolderName[folderName]; exists { - locksByFolderName[folderName].Unlock() - delete(locksByFolderName, folderName) - } -} - -type tokenHeader struct { - Token string `header:"token"` -} -type keyHeader struct { - Key string `header:"key"` -} - -func folderNameFromKey(key []byte) string { - sha512Hash := hashSHA256(key) - return baseFolder + "/" + sha512Hash -} - -func hashSHA256(key []byte) string { - hasher := sha512.New() - hasher.Write(key) - return hex.EncodeToString(hasher.Sum(nil)) -} - -func createKey(context *gin.Context) { - ok := checkObjectCreationThrottler(context, "KEY") - if !ok { - return - } - globalLock.Lock() - defer globalLock.Unlock() - - keyBytesArr := make([]byte, keySize) - n, err := rand.Read(keyBytesArr[:]) - if n != keySize || err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to create key", - }) - return - } - err = os.Mkdir(folderNameFromKey(keyBytesArr), 0700) - if err != nil { - log.Println(err) - context.JSON(http.StatusInternalServerError, gin.H{ - "error": "unable to create key", - }) - return - } - context.JSON(http.StatusCreated, gin.H{ - "key": base64.RawURLEncoding.EncodeToString(keyBytesArr[:]), - }) -} - -func checkTokenToFolderName(context *gin.Context) (folderNameOfKey string, key []byte, ok bool) { - header := tokenHeader{} - if err := context.ShouldBindHeader(&header); err != nil { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return folderNameOfKey, key, false - } - token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token)) - if len(token) == 0 || err != nil { - if err != nil { - log.Println(err) - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return folderNameOfKey, key, false - } - globalLock.Lock() - defer globalLock.Unlock() - housekeepingTokenMaps() // to remove timed-out ones - tokenHash := hashSHA256(token) - if timeoutStruct, exists := mapTokenHashToTimeoutStruct[tokenHash]; exists { - // re-create the key from token - key := xor(token, timeoutStruct.xorRand) - folderNameOfKey := folderNameFromKey(key) - if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return folderNameOfKey, key, false - } - timeoutStruct.lastAcessedNanotime = time.Now().UnixNano() - return folderNameOfKey, key, true - } else { - context.JSON(http.StatusNotFound, gin.H{ - "error": "token not found", - }) - return folderNameOfKey, key, false - } -} - -func checkKeyToFolderName(context *gin.Context) (folderNameOfKey string, key []byte, ok bool) { - header := keyHeader{} - if err := context.ShouldBindHeader(&header); err != nil { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "key not found", - }) - return folderNameOfKey, key, false - } - key, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Key)) - if len(key) == 0 || err != nil { - if err != nil { - log.Println(err) - } - context.JSON(http.StatusNotFound, gin.H{ - "error": "key not found", - }) - return folderNameOfKey, key, false - } - folderNameOfKey = folderNameFromKey(key) - if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) { - log.Println(err) - context.JSON(http.StatusNotFound, gin.H{ - "error": "key not found", - }) - return folderNameOfKey, key, false - } - return folderNameOfKey, key, true -} - -func deleteKey(context *gin.Context) { - folderName, _, ok := checkKeyToFolderName(context) - if !ok { - return - } - globalLock.Lock() - defer globalLock.Unlock() - err := os.RemoveAll(folderName) - if err != nil { - log.Println("error during key delete: " + err.Error()) - context.JSON(http.StatusNotFound, gin.H{ - "error": "key not found", - }) - return - } - context.JSON(http.StatusOK, gin.H{ - "message": "key deleted", - }) -} - -func parseCommandlineArgs() { - modelFilename = flag.String("model", "threagile.yaml", "input model yaml file") - outputDir = flag.String("output", ".", "output directory") - raaPlugin = flag.String("raa-plugin", "raa.so", "RAA calculation plugin (.so shared object) file name") - executeModelMacro = flag.String("execute-model-macro", "", "Execute model macro (by ID)") - createExampleModel = flag.Bool("create-example-model", false, "just create an example model named threagile-example-model.yaml in the output directory") - createStubModel = flag.Bool("create-stub-model", false, "just create a minimal stub model named threagile-stub-model.yaml in the output directory") - createEditingSupport = flag.Bool("create-editing-support", false, "just create some editing support stuff in the output directory") - serverPort = flag.Int("server", 0, "start a server (instead of commandline execution) on the given port") - templateFilename = flag.String("background", "background.pdf", "background pdf file") - generateDataFlowDiagram = flag.Bool("generate-data-flow-diagram", true, "generate data-flow diagram") - generateDataAssetDiagram = flag.Bool("generate-data-asset-diagram", true, "generate data asset diagram") - generateRisksJSON = flag.Bool("generate-risks-json", true, "generate risks json") - generateTechnicalAssetsJSON = flag.Bool("generate-technical-assets-json", true, "generate technical assets json") - generateStatsJSON = flag.Bool("generate-stats-json", true, "generate stats json") - generateRisksExcel = flag.Bool("generate-risks-excel", true, "generate risks excel") - generateTagsExcel = flag.Bool("generate-tags-excel", true, "generate tags excel") - generateReportPDF = flag.Bool("generate-report-pdf", true, "generate report pdf, including diagrams") - diagramDPI = flag.Int("diagram-dpi", defaultGraphvizDPI, "DPI used to render: maximum is "+strconv.Itoa(maxGraphvizDPI)+"") - skipRiskRules = flag.String("skip-risk-rules", "", "comma-separated list of risk rules (by their ID) to skip") - riskRulesPlugins = flag.String("custom-risk-rules-plugins", "", "comma-separated list of plugins (.so shared object) file names with custom risk rules to load") - verbose = flag.Bool("verbose", false, "verbose output") - ignoreOrphanedRiskTracking = flag.Bool("ignore-orphaned-risk-tracking", false, "ignore orphaned risk tracking (just log them) not matching a concrete risk") - version := flag.Bool("version", false, "print version") - listTypes := flag.Bool("list-types", false, "print type information (enum values to be used in models)") - listRiskRules := flag.Bool("list-risk-rules", false, "print risk rules") - listModelMacros := flag.Bool("list-model-macros", false, "print model macros") - explainTypes := flag.Bool("explain-types", false, "Detailed explanation of all the types") - explainRiskRules := flag.Bool("explain-risk-rules", false, "Detailed explanation of all the risk rules") - explainModelMacros := flag.Bool("explain-model-macros", false, "Detailed explanation of all the model macros") - print3rdParty := flag.Bool("print-3rd-party-licenses", false, "print 3rd-party license information") - license := flag.Bool("print-license", false, "print license information") - flag.Usage = func() { - printLogo() - fmt.Fprintf(os.Stderr, "Usage: threagile [options]") - fmt.Println() - fmt.Println() - fmt.Println() - fmt.Println("Options:") - fmt.Println() - flag.PrintDefaults() - fmt.Println() - fmt.Println() - fmt.Println("Examples:") - fmt.Println() - fmt.Println("If you want to create an example model (via docker) as a starting point to learn about Threagile just run: ") - fmt.Println(" docker run --rm -it " + - "-v \"$(pwd)\":/app/work " + - "threagile/threagile " + - "-create-example-model " + - "-output /app/work") - fmt.Println() - fmt.Println("If you want to create a minimal stub model (via docker) as a starting point for your own model just run: ") - fmt.Println(" docker run --rm -it " + - "-v \"$(pwd)\":/app/work " + - "threagile/threagile " + - "-create-stub-model " + - "-output /app/work") - fmt.Println() - printExamples() - fmt.Println() - } - flag.Parse() - if *diagramDPI < 20 { - *diagramDPI = 20 - } else if *diagramDPI > maxGraphvizDPI { - *diagramDPI = 300 - } - if *version { - printLogo() - os.Exit(0) - } - if *listTypes { - printLogo() - fmt.Println("The following types are available (can be extended for custom rules):") - fmt.Println() - printTypes("Authentication", model.AuthenticationValues()) - fmt.Println() - printTypes("Authorization", model.AuthorizationValues()) - fmt.Println() - printTypes("Confidentiality", model.ConfidentialityValues()) - fmt.Println() - printTypes("Criticality (for integrity and availability)", model.CriticalityValues()) - fmt.Println() - printTypes("Data Breach Probability", model.DataBreachProbabilityValues()) - fmt.Println() - printTypes("Data Format", model.DataFormatValues()) - fmt.Println() - printTypes("Encryption", model.EncryptionStyleValues()) - fmt.Println() - printTypes("Protocol", model.ProtocolValues()) - fmt.Println() - printTypes("Quantity", model.QuantityValues()) - fmt.Println() - printTypes("Risk Exploitation Impact", model.RiskExploitationImpactValues()) - fmt.Println() - printTypes("Risk Exploitation Likelihood", model.RiskExploitationLikelihoodValues()) - fmt.Println() - printTypes("Risk Function", model.RiskFunctionValues()) - fmt.Println() - printTypes("Risk Severity", model.RiskSeverityValues()) - fmt.Println() - printTypes("Risk Status", model.RiskStatusValues()) - fmt.Println() - printTypes("STRIDE", model.STRIDEValues()) - fmt.Println() - printTypes("Technical Asset Machine", model.TechnicalAssetMachineValues()) - fmt.Println() - printTypes("Technical Asset Size", model.TechnicalAssetSizeValues()) - fmt.Println() - printTypes("Technical Asset Technology", model.TechnicalAssetTechnologyValues()) - fmt.Println() - printTypes("Technical Asset Type", model.TechnicalAssetTypeValues()) - fmt.Println() - printTypes("Trust Boundary Type", model.TrustBoundaryTypeValues()) - fmt.Println() - printTypes("Usage", model.UsageValues()) - fmt.Println() - os.Exit(0) - } - if *listModelMacros { - printLogo() - fmt.Println("The following model macros are available (can be extended via custom model macros):") - fmt.Println() - /* TODO finish plugin stuff - fmt.Println("Custom model macros:") - for id, customModelMacro := range customModelMacros { - fmt.Println(id, "-->", customModelMacro.GetMacroDetails().Title) - } - fmt.Println() - */ - fmt.Println("----------------------") - fmt.Println("Built-in model macros:") - fmt.Println("----------------------") - fmt.Println(add_build_pipeline.GetMacroDetails().ID, "-->", add_build_pipeline.GetMacroDetails().Title) - fmt.Println(add_vault.GetMacroDetails().ID, "-->", add_vault.GetMacroDetails().Title) - fmt.Println(pretty_print.GetMacroDetails().ID, "-->", pretty_print.GetMacroDetails().Title) - fmt.Println(remove_unused_tags.GetMacroDetails().ID, "-->", remove_unused_tags.GetMacroDetails().Title) - fmt.Println(seed_risk_tracking.GetMacroDetails().ID, "-->", seed_risk_tracking.GetMacroDetails().Title) - fmt.Println(seed_tags.GetMacroDetails().ID, "-->", seed_tags.GetMacroDetails().Title) - fmt.Println() - os.Exit(0) - } - if *listRiskRules { - printLogo() - fmt.Println("The following risk rules are available (can be extended via custom risk rules):") - fmt.Println() - fmt.Println("------------------") - fmt.Println("Custom risk rules:") - fmt.Println("------------------") - loadCustomRiskRules() - for id, customRule := range customRiskRules { - fmt.Println(id, "-->", customRule.Category().Title, "--> with tags:", customRule.SupportedTags()) - } - fmt.Println() - fmt.Println("--------------------") - fmt.Println("Built-in risk rules:") - fmt.Println("--------------------") - fmt.Println(accidental_secret_leak.Category().Id, "-->", accidental_secret_leak.Category().Title, "--> with tags:", accidental_secret_leak.SupportedTags()) - fmt.Println(code_backdooring.Category().Id, "-->", code_backdooring.Category().Title, "--> with tags:", code_backdooring.SupportedTags()) - fmt.Println(container_baseimage_backdooring.Category().Id, "-->", container_baseimage_backdooring.Category().Title, "--> with tags:", container_baseimage_backdooring.SupportedTags()) - fmt.Println(container_platform_escape.Category().Id, "-->", container_platform_escape.Category().Title, "--> with tags:", container_platform_escape.SupportedTags()) - fmt.Println(cross_site_request_forgery.Category().Id, "-->", cross_site_request_forgery.Category().Title, "--> with tags:", cross_site_request_forgery.SupportedTags()) - fmt.Println(cross_site_scripting.Category().Id, "-->", cross_site_scripting.Category().Title, "--> with tags:", cross_site_scripting.SupportedTags()) - fmt.Println(dos_risky_access_across_trust_boundary.Category().Id, "-->", dos_risky_access_across_trust_boundary.Category().Title, "--> with tags:", dos_risky_access_across_trust_boundary.SupportedTags()) - fmt.Println(incomplete_model.Category().Id, "-->", incomplete_model.Category().Title, "--> with tags:", incomplete_model.SupportedTags()) - fmt.Println(ldap_injection.Category().Id, "-->", ldap_injection.Category().Title, "--> with tags:", ldap_injection.SupportedTags()) - fmt.Println(missing_authentication.Category().Id, "-->", missing_authentication.Category().Title, "--> with tags:", missing_authentication.SupportedTags()) - fmt.Println(missing_authentication_second_factor.Category().Id, "-->", missing_authentication_second_factor.Category().Title, "--> with tags:", missing_authentication_second_factor.SupportedTags()) - fmt.Println(missing_build_infrastructure.Category().Id, "-->", missing_build_infrastructure.Category().Title, "--> with tags:", missing_build_infrastructure.SupportedTags()) - fmt.Println(missing_cloud_hardening.Category().Id, "-->", missing_cloud_hardening.Category().Title, "--> with tags:", missing_cloud_hardening.SupportedTags()) - fmt.Println(missing_file_validation.Category().Id, "-->", missing_file_validation.Category().Title, "--> with tags:", missing_file_validation.SupportedTags()) - fmt.Println(missing_hardening.Category().Id, "-->", missing_hardening.Category().Title, "--> with tags:", missing_hardening.SupportedTags()) - fmt.Println(missing_identity_propagation.Category().Id, "-->", missing_identity_propagation.Category().Title, "--> with tags:", missing_identity_propagation.SupportedTags()) - fmt.Println(missing_identity_provider_isolation.Category().Id, "-->", missing_identity_provider_isolation.Category().Title, "--> with tags:", missing_identity_provider_isolation.SupportedTags()) - fmt.Println(missing_identity_store.Category().Id, "-->", missing_identity_store.Category().Title, "--> with tags:", missing_identity_store.SupportedTags()) - fmt.Println(missing_network_segmentation.Category().Id, "-->", missing_network_segmentation.Category().Title, "--> with tags:", missing_network_segmentation.SupportedTags()) - fmt.Println(missing_vault.Category().Id, "-->", missing_vault.Category().Title, "--> with tags:", missing_vault.SupportedTags()) - fmt.Println(missing_vault_isolation.Category().Id, "-->", missing_vault_isolation.Category().Title, "--> with tags:", missing_vault_isolation.SupportedTags()) - fmt.Println(missing_waf.Category().Id, "-->", missing_waf.Category().Title, "--> with tags:", missing_waf.SupportedTags()) - fmt.Println(mixed_targets_on_shared_runtime.Category().Id, "-->", mixed_targets_on_shared_runtime.Category().Title, "--> with tags:", mixed_targets_on_shared_runtime.SupportedTags()) - fmt.Println(path_traversal.Category().Id, "-->", path_traversal.Category().Title, "--> with tags:", path_traversal.SupportedTags()) - fmt.Println(push_instead_of_pull_deployment.Category().Id, "-->", push_instead_of_pull_deployment.Category().Title, "--> with tags:", push_instead_of_pull_deployment.SupportedTags()) - fmt.Println(search_query_injection.Category().Id, "-->", search_query_injection.Category().Title, "--> with tags:", search_query_injection.SupportedTags()) - fmt.Println(server_side_request_forgery.Category().Id, "-->", server_side_request_forgery.Category().Title, "--> with tags:", server_side_request_forgery.SupportedTags()) - fmt.Println(service_registry_poisoning.Category().Id, "-->", service_registry_poisoning.Category().Title, "--> with tags:", service_registry_poisoning.SupportedTags()) - fmt.Println(sql_nosql_injection.Category().Id, "-->", sql_nosql_injection.Category().Title, "--> with tags:", sql_nosql_injection.SupportedTags()) - fmt.Println(unchecked_deployment.Category().Id, "-->", unchecked_deployment.Category().Title, "--> with tags:", unchecked_deployment.SupportedTags()) - fmt.Println(unencrypted_asset.Category().Id, "-->", unencrypted_asset.Category().Title, "--> with tags:", unencrypted_asset.SupportedTags()) - fmt.Println(unencrypted_communication.Category().Id, "-->", unencrypted_communication.Category().Title, "--> with tags:", unencrypted_communication.SupportedTags()) - fmt.Println(unguarded_access_from_internet.Category().Id, "-->", unguarded_access_from_internet.Category().Title, "--> with tags:", unguarded_access_from_internet.SupportedTags()) - fmt.Println(unguarded_direct_datastore_access.Category().Id, "-->", unguarded_direct_datastore_access.Category().Title, "--> with tags:", unguarded_direct_datastore_access.SupportedTags()) - fmt.Println(unnecessary_communication_link.Category().Id, "-->", unnecessary_communication_link.Category().Title, "--> with tags:", unnecessary_communication_link.SupportedTags()) - fmt.Println(unnecessary_data_asset.Category().Id, "-->", unnecessary_data_asset.Category().Title, "--> with tags:", unnecessary_data_asset.SupportedTags()) - fmt.Println(unnecessary_data_transfer.Category().Id, "-->", unnecessary_data_transfer.Category().Title, "--> with tags:", unnecessary_data_transfer.SupportedTags()) - fmt.Println(unnecessary_technical_asset.Category().Id, "-->", unnecessary_technical_asset.Category().Title, "--> with tags:", unnecessary_technical_asset.SupportedTags()) - fmt.Println(untrusted_deserialization.Category().Id, "-->", untrusted_deserialization.Category().Title, "--> with tags:", untrusted_deserialization.SupportedTags()) - fmt.Println(wrong_communication_link_content.Category().Id, "-->", wrong_communication_link_content.Category().Title, "--> with tags:", wrong_communication_link_content.SupportedTags()) - fmt.Println(wrong_trust_boundary_content.Category().Id, "-->", wrong_trust_boundary_content.Category().Title, "--> with tags:", wrong_trust_boundary_content.SupportedTags()) - fmt.Println(xml_external_entity.Category().Id, "-->", xml_external_entity.Category().Title, "--> with tags:", xml_external_entity.SupportedTags()) - fmt.Println() - os.Exit(0) - } - if *explainTypes { - printLogo() - fmt.Println("Explanation for the types:") - fmt.Println() - printExplainTypes("Authentication", model.AuthenticationValues()) - printExplainTypes("Authorization", model.AuthorizationValues()) - printExplainTypes("Confidentiality", model.ConfidentialityValues()) - printExplainTypes("Criticality", model.CriticalityValues()) - printExplainTypes("Data Breach Probability", model.DataBreachProbabilityValues()) - printExplainTypes("Data Format", model.DataFormatValues()) - printExplainTypes("Encryption", model.EncryptionStyleValues()) - printExplainTypes("Protocol", model.ProtocolValues()) - printExplainTypes("Quantity", model.QuantityValues()) - printExplainTypes("Risk Exploitation Impact", model.RiskExploitationImpactValues()) - printExplainTypes("Risk Exploitation likelihood", model.RiskExploitationLikelihoodValues()) - printExplainTypes("Risk Function", model.RiskFunctionValues()) - printExplainTypes("Risk Severity", model.RiskSeverityValues()) - printExplainTypes("Risk Status", model.RiskStatusValues()) - printExplainTypes("STRIDE", model.STRIDEValues()) - printExplainTypes("Technical Asset Machine", model.TechnicalAssetMachineValues()) - printExplainTypes("Technical Asset Size", model.TechnicalAssetSizeValues()) - printExplainTypes("Technical Asset Technology", model.TechnicalAssetTechnologyValues()) - printExplainTypes("Technical Asset Type", model.TechnicalAssetTypeValues()) - printExplainTypes("Trust Boundary Type", model.TrustBoundaryTypeValues()) - printExplainTypes("Usage", model.UsageValues()) - - os.Exit(0) - } - if *explainModelMacros { - printLogo() - fmt.Println("Explanation for the model macros:") - fmt.Println() - fmt.Printf("%v: %v\n", add_build_pipeline.GetMacroDetails().ID, add_build_pipeline.GetMacroDetails().Description) - fmt.Printf("%v: %v\n", add_vault.GetMacroDetails().ID, add_vault.GetMacroDetails().Description) - fmt.Printf("%v: %v\n", pretty_print.GetMacroDetails().ID, pretty_print.GetMacroDetails().Description) - fmt.Printf("%v: %v\n", remove_unused_tags.GetMacroDetails().ID, remove_unused_tags.GetMacroDetails().Description) - fmt.Printf("%v: %v\n", seed_risk_tracking.GetMacroDetails().ID, seed_risk_tracking.GetMacroDetails().Description) - fmt.Printf("%v: %v\n", seed_tags.GetMacroDetails().ID, seed_tags.GetMacroDetails().Description) - fmt.Println() - os.Exit(0) - - } - if *explainRiskRules { - printLogo() - fmt.Println("Explanation for risk rules:") - fmt.Println() - fmt.Printf("%v: %v\n", accidental_secret_leak.Category().Id, accidental_secret_leak.Category().Description) - fmt.Printf("%v: %v\n", code_backdooring.Category().Id, code_backdooring.Category().Description) - fmt.Printf("%v: %v\n", container_baseimage_backdooring.Category().Id, container_baseimage_backdooring.Category().Description) - fmt.Printf("%v: %v\n", container_platform_escape.Category().Id, container_platform_escape.Category().Description) - fmt.Printf("%v: %v\n", cross_site_request_forgery.Category().Id, cross_site_request_forgery.Category().Description) - fmt.Printf("%v: %v\n", cross_site_scripting.Category().Id, cross_site_scripting.Category().Description) - fmt.Printf("%v: %v\n", dos_risky_access_across_trust_boundary.Category().Id, dos_risky_access_across_trust_boundary.Category().Description) - fmt.Printf("%v: %v\n", incomplete_model.Category().Id, incomplete_model.Category().Description) - fmt.Printf("%v: %v\n", ldap_injection.Category().Id, ldap_injection.Category().Description) - fmt.Printf("%v: %v\n", missing_authentication.Category().Id, missing_authentication.Category().Description) - fmt.Printf("%v: %v\n", missing_authentication_second_factor.Category().Id, missing_authentication_second_factor.Category().Description) - fmt.Printf("%v: %v\n", missing_build_infrastructure.Category().Id, missing_build_infrastructure.Category().Description) - fmt.Printf("%v: %v\n", missing_cloud_hardening.Category().Id, missing_cloud_hardening.Category().Description) - fmt.Printf("%v: %v\n", missing_file_validation.Category().Id, missing_file_validation.Category().Description) - fmt.Printf("%v: %v\n", missing_hardening.Category().Id, missing_hardening.Category().Description) - fmt.Printf("%v: %v\n", missing_identity_propagation.Category().Id, missing_identity_propagation.Category().Description) - fmt.Printf("%v: %v\n", missing_identity_provider_isolation.Category().Id, missing_identity_provider_isolation.Category().Description) - fmt.Printf("%v: %v\n", missing_identity_store.Category().Id, missing_identity_store.Category().Description) - fmt.Printf("%v: %v\n", missing_network_segmentation.Category().Id, missing_network_segmentation.Category().Description) - fmt.Printf("%v: %v\n", missing_vault.Category().Id, missing_vault.Category().Description) - fmt.Printf("%v: %v\n", missing_vault_isolation.Category().Id, missing_vault_isolation.Category().Description) - fmt.Printf("%v: %v\n", missing_waf.Category().Id, missing_waf.Category().Description) - fmt.Printf("%v: %v\n", mixed_targets_on_shared_runtime.Category().Id, mixed_targets_on_shared_runtime.Category().Description) - fmt.Printf("%v: %v\n", path_traversal.Category().Id, path_traversal.Category().Description) - fmt.Printf("%v: %v\n", push_instead_of_pull_deployment.Category().Id, push_instead_of_pull_deployment.Category().Description) - fmt.Printf("%v: %v\n", search_query_injection.Category().Id, search_query_injection.Category().Description) - fmt.Printf("%v: %v\n", server_side_request_forgery.Category().Id, server_side_request_forgery.Category().Description) - fmt.Printf("%v: %v\n", service_registry_poisoning.Category().Id, service_registry_poisoning.Category().Description) - fmt.Printf("%v: %v\n", sql_nosql_injection.Category().Id, sql_nosql_injection.Category().Description) - fmt.Printf("%v: %v\n", unchecked_deployment.Category().Id, unchecked_deployment.Category().Description) - fmt.Printf("%v: %v\n", unencrypted_asset.Category().Id, unencrypted_asset.Category().Description) - fmt.Printf("%v: %v\n", unencrypted_communication.Category().Id, unencrypted_communication.Category().Description) - fmt.Printf("%v: %v\n", unguarded_access_from_internet.Category().Id, unguarded_access_from_internet.Category().Description) - fmt.Printf("%v: %v\n", unguarded_direct_datastore_access.Category().Id, unguarded_direct_datastore_access.Category().Description) - fmt.Printf("%v: %v\n", unnecessary_communication_link.Category().Id, unnecessary_communication_link.Category().Description) - fmt.Printf("%v: %v\n", unnecessary_data_asset.Category().Id, unnecessary_data_asset.Category().Description) - fmt.Printf("%v: %v\n", unnecessary_data_transfer.Category().Id, unnecessary_data_transfer.Category().Description) - fmt.Printf("%v: %v\n", unnecessary_technical_asset.Category().Id, unnecessary_technical_asset.Category().Description) - fmt.Printf("%v: %v\n", untrusted_deserialization.Category().Id, untrusted_deserialization.Category().Description) - fmt.Printf("%v: %v\n", wrong_communication_link_content.Category().Id, wrong_communication_link_content.Category().Description) - fmt.Printf("%v: %v\n", wrong_trust_boundary_content.Category().Id, wrong_trust_boundary_content.Category().Description) - fmt.Printf("%v: %v\n", xml_external_entity.Category().Id, xml_external_entity.Category().Description) - fmt.Println() - os.Exit(0) - } - if *print3rdParty { - printLogo() - fmt.Println("Kudos & Credits to the following open-source projects:") - fmt.Println(" - golang (Google Go License): https://golang.org/LICENSE") - fmt.Println(" - go-yaml (MIT License): https://github.com/go-yaml/yaml/blob/v3/LICENSE") - fmt.Println(" - graphviz (CPL License): https://graphviz.gitlab.io/license/") - fmt.Println(" - gofpdf (MIT License): https://github.com/jung-kurt/gofpdf/blob/master/LICENSE") - fmt.Println(" - go-chart (MIT License): https://github.com/wcharczuk/go-chart/blob/master/LICENSE") - fmt.Println(" - excelize (BSD License): https://github.com/qax-os/excelize/blob/master/LICENSE") - fmt.Println(" - graphics-go (BSD License): https://github.com/BurntSushi/graphics-go/blob/master/LICENSE") - fmt.Println(" - google-uuid (BSD License): https://github.com/google/uuid/blob/master/LICENSE") - fmt.Println(" - gin-gonic (MIT License): https://github.com/gin-gonic/gin/blob/master/LICENSE") - fmt.Println(" - swagger-ui (Apache License): https://swagger.io/license/") - fmt.Println() - os.Exit(0) - } - if *license { - printLogo() - content, err := ioutil.ReadFile("/app/LICENSE.txt") - checkErr(err) - fmt.Print(string(content)) - fmt.Println() - os.Exit(0) - } - if *createExampleModel { - createExampleModelFile() - printLogo() - fmt.Println("An example model was created named threagile-example-model.yaml in the output directory.") - fmt.Println() - printExamples() - fmt.Println() - os.Exit(0) - } - if *createStubModel { - createStubModelFile() - printLogo() - fmt.Println("A minimal stub model was created named threagile-stub-model.yaml in the output directory.") - fmt.Println() - printExamples() - fmt.Println() - os.Exit(0) - } - if *createEditingSupport { - createEditingSupportFiles() - printLogo() - fmt.Println("The following files were created in the output directory:") - fmt.Println(" - schema.json") - fmt.Println(" - live-templates.txt") - fmt.Println() - fmt.Println("For a perfect editing experience within your IDE of choice you can easily get " + - "model syntax validation and autocompletion (very handy for enum values) as well as live templates: " + - "Just import the schema.json into your IDE and assign it as \"schema\" to each Threagile YAML file. " + - "Also try to import individual parts from the live-templates.txt file into your IDE as live editing templates.") - fmt.Println() - os.Exit(0) - } -} - -func printLogo() { - fmt.Println() - fmt.Println(" _____ _ _ _ \n |_ _| |__ _ __ ___ __ _ __ _(_) | ___ \n | | | '_ \\| '__/ _ \\/ _` |/ _` | | |/ _ \\\n | | | | | | | | __/ (_| | (_| | | | __/\n |_| |_| |_|_| \\___|\\__,_|\\__, |_|_|\\___|\n |___/ ") - fmt.Println("Threagile - Agile Threat Modeling") - fmt.Println() - fmt.Println() - printVersion() -} - -func printVersion() { - fmt.Println("Documentation: https://threagile.io") - fmt.Println("Docker Images: https://hub.docker.com/r/threagile/threagile") - fmt.Println("Sourcecode: https://github.com/threagile") - fmt.Println("License: Open-Source (MIT License)") - fmt.Println("Version: " + model.ThreagileVersion + " (" + buildTimestamp + ")") - fmt.Println() - fmt.Println() -} - -func createExampleModelFile() { - copyFile("/app/threagile-example-model.yaml", *outputDir+"/threagile-example-model.yaml") -} - -func createStubModelFile() { - loadCustomRiskRules() - stub, err := ioutil.ReadFile("/app/threagile-stub-model.yaml") - checkErr(err) - err = ioutil.WriteFile(*outputDir+"/threagile-stub-model.yaml", addSupportedTags(stub), 0644) - checkErr(err) -} - -func createEditingSupportFiles() { - copyFile("/app/schema.json", *outputDir+"/schema.json") - copyFile("/app/live-templates.txt", *outputDir+"/live-templates.txt") -} - -func printExamples() { - fmt.Println("If you want to execute Threagile on a model yaml file (via docker): ") - fmt.Println(" docker run --rm -it " + - "-v \"$(pwd)\":/app/work " + - "threagile/threagile " + - "-verbose " + - "-model /app/work/threagile.yaml " + - "-output /app/work") - fmt.Println() - fmt.Println("If you want to run Threagile as a server (REST API) on some port (here 8080): ") - fmt.Println(" docker run --rm -it " + - "--shm-size=256m " + - "-p 8080:8080 " + - "--name threagile-server " + - "--mount 'type=volume,src=threagile-storage,dst=/data,readonly=false' " + - "threagile/threagile -server 8080") - fmt.Println() - fmt.Println("If you want to find out about the different enum values usable in the model yaml file: ") - fmt.Println(" docker run --rm -it threagile/threagile -list-types") - fmt.Println() - fmt.Println("If you want to use some nice editing help (syntax validation, autocompletion, and live templates) in your favourite IDE: ") - fmt.Println(" docker run --rm -it -v \"$(pwd)\":/app/work threagile/threagile -create-editing-support -output /app/work") - fmt.Println() - fmt.Println("If you want to list all available model macros (which are macros capable of reading a model yaml file, asking you questions in a wizard-style and then update the model yaml file accordingly): ") - fmt.Println(" docker run --rm -it threagile/threagile -list-model-macros") - fmt.Println() - fmt.Println("If you want to execute a certain model macro on the model yaml file (here the macro add-build-pipeline): ") - fmt.Println(" docker run --rm -it -v \"$(pwd)\":/app/work threagile/threagile -model /app/work/threagile.yaml -output /app/work -execute-model-macro add-build-pipeline") -} - -func printTypes(title string, value interface{}) { - fmt.Println(fmt.Sprintf(" %v: %v", title, value)) -} - -// explainTypes prints and explanation block and a header -func printExplainTypes(title string, value []model.TypeEnum) { - fmt.Println(title) - for _, candidate := range value { - fmt.Printf("\t %v: %v\n", candidate, candidate.Explain()) - } -} - -func copyFile(src, dst string) (int64, error) { - sourceFileStat, err := os.Stat(src) - if err != nil { - return 0, err - } - - if !sourceFileStat.Mode().IsRegular() { - return 0, fmt.Errorf("%s is not a regular file", src) - } - - source, err := os.Open(src) - if err != nil { - return 0, err - } - defer source.Close() - - destination, err := os.Create(dst) - if err != nil { - return 0, err - } - defer destination.Close() - nBytes, err := io.Copy(destination, source) - return nBytes, err -} - -func parseModel(inputFilename string) { - if *verbose { - fmt.Println("Parsing model:", inputFilename) - } - modelYaml, err := ioutil.ReadFile(inputFilename) - if err == nil { - modelInput = model.ModelInput{} - err = yaml.Unmarshal(modelYaml, &modelInput) - checkErr(err) - //fmt.Println(modelInput) - - var businessCriticality model.Criticality - switch modelInput.Business_criticality { - case model.Archive.String(): - businessCriticality = model.Archive - case model.Operational.String(): - businessCriticality = model.Operational - case model.Important.String(): - businessCriticality = model.Important - case model.Critical.String(): - businessCriticality = model.Critical - case model.MissionCritical.String(): - businessCriticality = model.MissionCritical - default: - panic(errors.New("unknown 'business_criticality' value of application: " + modelInput.Business_criticality)) - } - - reportDate := time.Now() - if len(modelInput.Date) > 0 { - reportDate, err = time.Parse("2006-01-02", modelInput.Date) - if err != nil { - panic(errors.New("unable to parse 'date' value of model file")) - } - } - - model.ParsedModelRoot = model.ParsedModel{ - Author: modelInput.Author, - Title: modelInput.Title, - Date: reportDate, - ManagementSummaryComment: modelInput.Management_summary_comment, - BusinessCriticality: businessCriticality, - BusinessOverview: removePathElementsFromImageFiles(modelInput.Business_overview), - TechnicalOverview: removePathElementsFromImageFiles(modelInput.Technical_overview), - Questions: modelInput.Questions, - AbuseCases: modelInput.Abuse_cases, - SecurityRequirements: modelInput.Security_requirements, - TagsAvailable: lowerCaseAndTrim(modelInput.Tags_available), - DiagramTweakNodesep: modelInput.Diagram_tweak_nodesep, - DiagramTweakRanksep: modelInput.Diagram_tweak_ranksep, - DiagramTweakEdgeLayout: modelInput.Diagram_tweak_edge_layout, - DiagramTweakSuppressEdgeLabels: modelInput.Diagram_tweak_suppress_edge_labels, - DiagramTweakLayoutLeftToRight: modelInput.Diagram_tweak_layout_left_to_right, - DiagramTweakInvisibleConnectionsBetweenAssets: modelInput.Diagram_tweak_invisible_connections_between_assets, - DiagramTweakSameRankAssets: modelInput.Diagram_tweak_same_rank_assets, - } - if model.ParsedModelRoot.DiagramTweakNodesep == 0 { - model.ParsedModelRoot.DiagramTweakNodesep = 2 - } - if model.ParsedModelRoot.DiagramTweakRanksep == 0 { - model.ParsedModelRoot.DiagramTweakRanksep = 2 - } - - // Data Assets =============================================================================== - model.ParsedModelRoot.DataAssets = make(map[string]model.DataAsset) - for title, asset := range modelInput.Data_assets { - id := fmt.Sprintf("%v", asset.ID) - - var usage model.Usage - switch asset.Usage { - case model.Business.String(): - usage = model.Business - case model.DevOps.String(): - usage = model.DevOps - default: - panic(errors.New("unknown 'usage' value of data asset '" + title + "': " + asset.Usage)) - } - - var quantity model.Quantity - switch asset.Quantity { - case model.VeryFew.String(): - quantity = model.VeryFew - case model.Few.String(): - quantity = model.Few - case model.Many.String(): - quantity = model.Many - case model.VeryMany.String(): - quantity = model.VeryMany - default: - panic(errors.New("unknown 'quantity' value of data asset '" + title + "': " + asset.Quantity)) - } - - var confidentiality model.Confidentiality - switch asset.Confidentiality { - case model.Public.String(): - confidentiality = model.Public - case model.Internal.String(): - confidentiality = model.Internal - case model.Restricted.String(): - confidentiality = model.Restricted - case model.Confidential.String(): - confidentiality = model.Confidential - case model.StrictlyConfidential.String(): - confidentiality = model.StrictlyConfidential - default: - panic(errors.New("unknown 'confidentiality' value of data asset '" + title + "': " + asset.Confidentiality)) - } - - var integrity model.Criticality - switch asset.Integrity { - case model.Archive.String(): - integrity = model.Archive - case model.Operational.String(): - integrity = model.Operational - case model.Important.String(): - integrity = model.Important - case model.Critical.String(): - integrity = model.Critical - case model.MissionCritical.String(): - integrity = model.MissionCritical - default: - panic(errors.New("unknown 'integrity' value of data asset '" + title + "': " + asset.Integrity)) - } - - var availability model.Criticality - switch asset.Availability { - case model.Archive.String(): - availability = model.Archive - case model.Operational.String(): - availability = model.Operational - case model.Important.String(): - availability = model.Important - case model.Critical.String(): - availability = model.Critical - case model.MissionCritical.String(): - availability = model.MissionCritical - default: - panic(errors.New("unknown 'availability' value of data asset '" + title + "': " + asset.Availability)) - } - - checkIdSyntax(id) - if _, exists := model.ParsedModelRoot.DataAssets[id]; exists { - panic(errors.New("duplicate id used: " + id)) - } - model.ParsedModelRoot.DataAssets[id] = model.DataAsset{ - Id: id, - Title: title, - Usage: usage, - Description: withDefault(fmt.Sprintf("%v", asset.Description), title), - Quantity: quantity, - Tags: checkTags(lowerCaseAndTrim(asset.Tags), "data asset '"+title+"'"), - Origin: fmt.Sprintf("%v", asset.Origin), - Owner: fmt.Sprintf("%v", asset.Owner), - Confidentiality: confidentiality, - Integrity: integrity, - Availability: availability, - JustificationCiaRating: fmt.Sprintf("%v", asset.Justification_cia_rating), - } - } - - // Technical Assets =============================================================================== - model.ParsedModelRoot.TechnicalAssets = make(map[string]model.TechnicalAsset) - for title, asset := range modelInput.Technical_assets { - id := fmt.Sprintf("%v", asset.ID) - - var usage model.Usage - switch asset.Usage { - case model.Business.String(): - usage = model.Business - case model.DevOps.String(): - usage = model.DevOps - default: - panic(errors.New("unknown 'usage' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Usage))) - } - - var dataAssetsProcessed = make([]string, 0) - if asset.Data_assets_processed != nil { - dataAssetsProcessed = make([]string, len(asset.Data_assets_processed)) - for i, parsedProcessedAsset := range asset.Data_assets_processed { - referencedAsset := fmt.Sprintf("%v", parsedProcessedAsset) - checkDataAssetTargetExists(referencedAsset, "technical asset '"+title+"'") - dataAssetsProcessed[i] = referencedAsset - } - } - - var dataAssetsStored = make([]string, 0) - if asset.Data_assets_stored != nil { - dataAssetsStored = make([]string, len(asset.Data_assets_stored)) - for i, parsedStoredAssets := range asset.Data_assets_stored { - referencedAsset := fmt.Sprintf("%v", parsedStoredAssets) - checkDataAssetTargetExists(referencedAsset, "technical asset '"+title+"'") - dataAssetsStored[i] = referencedAsset - } - } - - var technicalAssetType model.TechnicalAssetType - switch asset.Type { - case model.ExternalEntity.String(): - technicalAssetType = model.ExternalEntity - case model.Process.String(): - technicalAssetType = model.Process - case model.Datastore.String(): - technicalAssetType = model.Datastore - default: - panic(errors.New("unknown 'type' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Type))) - } - - var technicalAssetSize model.TechnicalAssetSize - switch asset.Size { - case model.Service.String(): - technicalAssetSize = model.Service - case model.System.String(): - technicalAssetSize = model.System - case model.Application.String(): - technicalAssetSize = model.Application - case model.Component.String(): - technicalAssetSize = model.Component - default: - panic(errors.New("unknown 'size' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Size))) - } - - var technicalAssetTechnology model.TechnicalAssetTechnology - switch asset.Technology { - case model.UnknownTechnology.String(): - technicalAssetTechnology = model.UnknownTechnology - case model.ClientSystem.String(): - technicalAssetTechnology = model.ClientSystem - case model.Browser.String(): - technicalAssetTechnology = model.Browser - case model.Desktop.String(): - technicalAssetTechnology = model.Desktop - case model.MobileApp.String(): - technicalAssetTechnology = model.MobileApp - case model.DevOpsClient.String(): - technicalAssetTechnology = model.DevOpsClient - case model.WebServer.String(): - technicalAssetTechnology = model.WebServer - case model.WebApplication.String(): - technicalAssetTechnology = model.WebApplication - case model.ApplicationServer.String(): - technicalAssetTechnology = model.ApplicationServer - case model.Database.String(): - technicalAssetTechnology = model.Database - case model.FileServer.String(): - technicalAssetTechnology = model.FileServer - case model.LocalFileSystem.String(): - technicalAssetTechnology = model.LocalFileSystem - case model.ERP.String(): - technicalAssetTechnology = model.ERP - case model.CMS.String(): - technicalAssetTechnology = model.CMS - case model.WebServiceREST.String(): - technicalAssetTechnology = model.WebServiceREST - case model.WebServiceSOAP.String(): - technicalAssetTechnology = model.WebServiceSOAP - case model.EJB.String(): - technicalAssetTechnology = model.EJB - case model.SearchIndex.String(): - technicalAssetTechnology = model.SearchIndex - case model.SearchEngine.String(): - technicalAssetTechnology = model.SearchEngine - case model.ServiceRegistry.String(): - technicalAssetTechnology = model.ServiceRegistry - case model.ReverseProxy.String(): - technicalAssetTechnology = model.ReverseProxy - case model.LoadBalancer.String(): - technicalAssetTechnology = model.LoadBalancer - case model.BuildPipeline.String(): - technicalAssetTechnology = model.BuildPipeline - case model.SourcecodeRepository.String(): - technicalAssetTechnology = model.SourcecodeRepository - case model.ArtifactRegistry.String(): - technicalAssetTechnology = model.ArtifactRegistry - case model.CodeInspectionPlatform.String(): - technicalAssetTechnology = model.CodeInspectionPlatform - case model.Monitoring.String(): - technicalAssetTechnology = model.Monitoring - case model.LDAPServer.String(): - technicalAssetTechnology = model.LDAPServer - case model.ContainerPlatform.String(): - technicalAssetTechnology = model.ContainerPlatform - case model.BatchProcessing.String(): - technicalAssetTechnology = model.BatchProcessing - case model.EventListener.String(): - technicalAssetTechnology = model.EventListener - case model.IdentityProvider.String(): - technicalAssetTechnology = model.IdentityProvider - case model.IdentityStoreLDAP.String(): - technicalAssetTechnology = model.IdentityStoreLDAP - case model.IdentityStoreDatabase.String(): - technicalAssetTechnology = model.IdentityStoreDatabase - case model.Tool.String(): - technicalAssetTechnology = model.Tool - case model.CLI.String(): - technicalAssetTechnology = model.CLI - case model.Task.String(): - technicalAssetTechnology = model.Task - case model.Function.String(): - technicalAssetTechnology = model.Function - case model.Gateway.String(): - technicalAssetTechnology = model.Gateway - case model.IoTDevice.String(): - technicalAssetTechnology = model.IoTDevice - case model.MessageQueue.String(): - technicalAssetTechnology = model.MessageQueue - case model.StreamProcessing.String(): - technicalAssetTechnology = model.StreamProcessing - case model.ServiceMesh.String(): - technicalAssetTechnology = model.ServiceMesh - case model.DataLake.String(): - technicalAssetTechnology = model.DataLake - case model.BigDataPlatform.String(): - technicalAssetTechnology = model.BigDataPlatform - case model.ReportEngine.String(): - technicalAssetTechnology = model.ReportEngine - case model.AI.String(): - technicalAssetTechnology = model.AI - case model.MailServer.String(): - technicalAssetTechnology = model.MailServer - case model.Vault.String(): - technicalAssetTechnology = model.Vault - case model.HSM.String(): - technicalAssetTechnology = model.HSM - case model.WAF.String(): - technicalAssetTechnology = model.WAF - case model.IDS.String(): - technicalAssetTechnology = model.IDS - case model.IPS.String(): - technicalAssetTechnology = model.IPS - case model.Scheduler.String(): - technicalAssetTechnology = model.Scheduler - case model.Mainframe.String(): - technicalAssetTechnology = model.Mainframe - case model.BlockStorage.String(): - technicalAssetTechnology = model.BlockStorage - case model.Library.String(): - technicalAssetTechnology = model.Library - default: - panic(errors.New("unknown 'technology' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Technology))) - } - - var encryption model.EncryptionStyle - switch asset.Encryption { - case model.NoneEncryption.String(): - encryption = model.NoneEncryption - case model.Transparent.String(): - encryption = model.Transparent - case model.DataWithSymmetricSharedKey.String(): - encryption = model.DataWithSymmetricSharedKey - case model.DataWithAsymmetricSharedKey.String(): - encryption = model.DataWithAsymmetricSharedKey - case model.DataWithEnduserIndividualKey.String(): - encryption = model.DataWithEnduserIndividualKey - default: - panic(errors.New("unknown 'encryption' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Encryption))) - } - - var technicalAssetMachine model.TechnicalAssetMachine - switch asset.Machine { - case model.Physical.String(): - technicalAssetMachine = model.Physical - case model.Virtual.String(): - technicalAssetMachine = model.Virtual - case model.Container.String(): - technicalAssetMachine = model.Container - case model.Serverless.String(): - technicalAssetMachine = model.Serverless - default: - panic(errors.New("unknown 'machine' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Machine))) - } - - var confidentiality model.Confidentiality - switch asset.Confidentiality { - case model.Public.String(): - confidentiality = model.Public - case model.Internal.String(): - confidentiality = model.Internal - case model.Restricted.String(): - confidentiality = model.Restricted - case model.Confidential.String(): - confidentiality = model.Confidential - case model.StrictlyConfidential.String(): - confidentiality = model.StrictlyConfidential - default: - panic(errors.New("unknown 'confidentiality' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Confidentiality))) - } - - var integrity model.Criticality - switch asset.Integrity { - case model.Archive.String(): - integrity = model.Archive - case model.Operational.String(): - integrity = model.Operational - case model.Important.String(): - integrity = model.Important - case model.Critical.String(): - integrity = model.Critical - case model.MissionCritical.String(): - integrity = model.MissionCritical - default: - panic(errors.New("unknown 'integrity' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Integrity))) - } - - var availability model.Criticality - switch asset.Availability { - case model.Archive.String(): - availability = model.Archive - case model.Operational.String(): - availability = model.Operational - case model.Important.String(): - availability = model.Important - case model.Critical.String(): - availability = model.Critical - case model.MissionCritical.String(): - availability = model.MissionCritical - default: - panic(errors.New("unknown 'availability' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Availability))) - } - - dataFormatsAccepted := make([]model.DataFormat, 0) - if asset.Data_formats_accepted != nil { - for _, dataFormatName := range asset.Data_formats_accepted { - switch dataFormatName { - case model.JSON.String(): - dataFormatsAccepted = append(dataFormatsAccepted, model.JSON) - case model.XML.String(): - dataFormatsAccepted = append(dataFormatsAccepted, model.XML) - case model.Serialization.String(): - dataFormatsAccepted = append(dataFormatsAccepted, model.Serialization) - case model.File.String(): - dataFormatsAccepted = append(dataFormatsAccepted, model.File) - case model.CSV.String(): - dataFormatsAccepted = append(dataFormatsAccepted, model.CSV) - default: - panic(errors.New("unknown 'data_formats_accepted' value of technical asset '" + title + "': " + fmt.Sprintf("%v", dataFormatName))) - } - } - } - - communicationLinks := make([]model.CommunicationLink, 0) - if asset.Communication_links != nil { - for commLinkTitle, commLink := range asset.Communication_links { - constraint := true - weight := 1 - var protocol model.Protocol - var authentication model.Authentication - var authorization model.Authorization - var usage model.Usage - var dataAssetsSent []string - var dataAssetsReceived []string - - switch commLink.Authentication { - case model.NoneAuthentication.String(): - authentication = model.NoneAuthentication - case model.Credentials.String(): - authentication = model.Credentials - case model.SessionId.String(): - authentication = model.SessionId - case model.Token.String(): - authentication = model.Token - case model.ClientCertificate.String(): - authentication = model.ClientCertificate - case model.TwoFactor.String(): - authentication = model.TwoFactor - case model.Externalized.String(): - authentication = model.Externalized - default: - panic(errors.New("unknown 'authentication' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Authentication))) - } - - switch commLink.Authorization { - case model.NoneAuthorization.String(): - authorization = model.NoneAuthorization - case model.TechnicalUser.String(): - authorization = model.TechnicalUser - case model.EnduserIdentityPropagation.String(): - authorization = model.EnduserIdentityPropagation - default: - panic(errors.New("unknown 'authorization' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Authorization))) - } - - switch commLink.Usage { - case model.Business.String(): - usage = model.Business - case model.DevOps.String(): - usage = model.DevOps - default: - panic(errors.New("unknown 'usage' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Usage))) - } - - switch commLink.Protocol { - case model.UnknownProtocol.String(): - protocol = model.UnknownProtocol - case model.HTTP.String(): - protocol = model.HTTP - case model.HTTPS.String(): - protocol = model.HTTPS - case model.WS.String(): - protocol = model.WS - case model.WSS.String(): - protocol = model.WSS - case model.MQTT.String(): - protocol = model.MQTT - case model.JDBC.String(): - protocol = model.JDBC - case model.JDBC_encrypted.String(): - protocol = model.JDBC_encrypted - case model.ODBC.String(): - protocol = model.ODBC - case model.ODBC_encrypted.String(): - protocol = model.ODBC_encrypted - case model.SQL_access_protocol.String(): - protocol = model.SQL_access_protocol - case model.SQL_access_protocol_encrypted.String(): - protocol = model.SQL_access_protocol_encrypted - case model.NoSQL_access_protocol.String(): - protocol = model.NoSQL_access_protocol - case model.NoSQL_access_protocol_encrypted.String(): - protocol = model.NoSQL_access_protocol_encrypted - case model.TEXT.String(): - protocol = model.TEXT - case model.TEXT_encrypted.String(): - protocol = model.TEXT_encrypted - case model.BINARY.String(): - protocol = model.BINARY - case model.BINARY_encrypted.String(): - protocol = model.BINARY_encrypted - case model.SSH.String(): - protocol = model.SSH - case model.SSH_tunnel.String(): - protocol = model.SSH_tunnel - case model.SMTP.String(): - protocol = model.SMTP - case model.SMTP_encrypted.String(): - protocol = model.SMTP_encrypted - case model.POP3.String(): - protocol = model.POP3 - case model.POP3_encrypted.String(): - protocol = model.POP3_encrypted - case model.IMAP.String(): - protocol = model.IMAP - case model.IMAP_encrypted.String(): - protocol = model.IMAP_encrypted - case model.FTP.String(): - protocol = model.FTP - case model.FTPS.String(): - protocol = model.FTPS - case model.SFTP.String(): - protocol = model.SFTP - case model.SCP.String(): - protocol = model.SCP - case model.LDAP.String(): - protocol = model.LDAP - case model.LDAPS.String(): - protocol = model.LDAPS - case model.JMS.String(): - protocol = model.JMS - case model.NFS.String(): - protocol = model.NFS - case model.SMB.String(): - protocol = model.SMB - case model.SMB_encrypted.String(): - protocol = model.SMB_encrypted - case model.LocalFileAccess.String(): - protocol = model.LocalFileAccess - case model.NRPE.String(): - protocol = model.NRPE - case model.XMPP.String(): - protocol = model.XMPP - case model.IIOP.String(): - protocol = model.IIOP - case model.IIOP_encrypted.String(): - protocol = model.IIOP_encrypted - case model.JRMP.String(): - protocol = model.JRMP - case model.JRMP_encrypted.String(): - protocol = model.JRMP_encrypted - case model.InProcessLibraryCall.String(): - protocol = model.InProcessLibraryCall - case model.ContainerSpawning.String(): - protocol = model.ContainerSpawning - default: - panic(errors.New("unknown 'protocol' of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Protocol))) - } - - if commLink.Data_assets_sent != nil { - for _, dataAssetSent := range commLink.Data_assets_sent { - referencedAsset := fmt.Sprintf("%v", dataAssetSent) - checkDataAssetTargetExists(referencedAsset, "communication link '"+commLinkTitle+"' of technical asset '"+title+"'") - dataAssetsSent = append(dataAssetsSent, referencedAsset) - } - } - - if commLink.Data_assets_received != nil { - for _, dataAssetReceived := range commLink.Data_assets_received { - referencedAsset := fmt.Sprintf("%v", dataAssetReceived) - checkDataAssetTargetExists(referencedAsset, "communication link '"+commLinkTitle+"' of technical asset '"+title+"'") - dataAssetsReceived = append(dataAssetsReceived, referencedAsset) - } - } - - if commLink.Diagram_tweak_weight > 0 { - weight = commLink.Diagram_tweak_weight - } - - constraint = !commLink.Diagram_tweak_constraint - - checkErr(err) - - dataFlowTitle := fmt.Sprintf("%v", commLinkTitle) - commLink := model.CommunicationLink{ - Id: createDataFlowId(id, dataFlowTitle), - SourceId: id, - TargetId: commLink.Target, - Title: dataFlowTitle, - Description: withDefault(commLink.Description, dataFlowTitle), - Protocol: protocol, - Authentication: authentication, - Authorization: authorization, - Usage: usage, - Tags: checkTags(lowerCaseAndTrim(commLink.Tags), "communication link '"+commLinkTitle+"' of technical asset '"+title+"'"), - VPN: commLink.VPN, - IpFiltered: commLink.IP_filtered, - Readonly: commLink.Readonly, - DataAssetsSent: dataAssetsSent, - DataAssetsReceived: dataAssetsReceived, - DiagramTweakWeight: weight, - DiagramTweakConstraint: constraint, - } - communicationLinks = append(communicationLinks, commLink) - // track all comm links - model.CommunicationLinks[commLink.Id] = commLink - // keep track of map of *all* comm links mapped by target-id (to be able to lookup "who is calling me" kind of things) - model.IncomingTechnicalCommunicationLinksMappedByTargetId[commLink.TargetId] = append( - model.IncomingTechnicalCommunicationLinksMappedByTargetId[commLink.TargetId], commLink) - } - } - - checkIdSyntax(id) - if _, exists := model.ParsedModelRoot.TechnicalAssets[id]; exists { - panic(errors.New("duplicate id used: " + id)) - } - model.ParsedModelRoot.TechnicalAssets[id] = model.TechnicalAsset{ - Id: id, - Usage: usage, - Title: title, //fmt.Sprintf("%v", asset["title"]), - Description: withDefault(fmt.Sprintf("%v", asset.Description), title), - Type: technicalAssetType, - Size: technicalAssetSize, - Technology: technicalAssetTechnology, - Tags: checkTags(lowerCaseAndTrim(asset.Tags), "technical asset '"+title+"'"), - Machine: technicalAssetMachine, - Internet: asset.Internet, - Encryption: encryption, - MultiTenant: asset.Multi_tenant, - Redundant: asset.Redundant, - CustomDevelopedParts: asset.Custom_developed_parts, - UsedAsClientByHuman: asset.Used_as_client_by_human, - OutOfScope: asset.Out_of_scope, - JustificationOutOfScope: fmt.Sprintf("%v", asset.Justification_out_of_scope), - Owner: fmt.Sprintf("%v", asset.Owner), - Confidentiality: confidentiality, - Integrity: integrity, - Availability: availability, - JustificationCiaRating: fmt.Sprintf("%v", asset.Justification_cia_rating), - DataAssetsProcessed: dataAssetsProcessed, - DataAssetsStored: dataAssetsStored, - DataFormatsAccepted: dataFormatsAccepted, - CommunicationLinks: communicationLinks, - DiagramTweakOrder: asset.Diagram_tweak_order, - } - } - - // Trust Boundaries =============================================================================== - checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries := make(map[string]bool) - model.ParsedModelRoot.TrustBoundaries = make(map[string]model.TrustBoundary) - for title, boundary := range modelInput.Trust_boundaries { - id := fmt.Sprintf("%v", boundary.ID) - - var technicalAssetsInside = make([]string, 0) - if boundary.Technical_assets_inside != nil { - parsedInsideAssets := boundary.Technical_assets_inside - technicalAssetsInside = make([]string, len(parsedInsideAssets)) - for i, parsedInsideAsset := range parsedInsideAssets { - technicalAssetsInside[i] = fmt.Sprintf("%v", parsedInsideAsset) - _, found := model.ParsedModelRoot.TechnicalAssets[technicalAssetsInside[i]] - if !found { - panic(errors.New("missing referenced technical asset " + technicalAssetsInside[i] + " at trust boundary '" + title + "'")) - } - if checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries[technicalAssetsInside[i]] == true { - panic(errors.New("referenced technical asset " + technicalAssetsInside[i] + " at trust boundary '" + title + "' is modeled in multiple trust boundaries")) - } - checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries[technicalAssetsInside[i]] = true - //fmt.Println("asset "+technicalAssetsInside[i]+" at i="+strconv.Itoa(i)) - } - } - - var trustBoundariesNested = make([]string, 0) - if boundary.Trust_boundaries_nested != nil { - parsedNestedBoundaries := boundary.Trust_boundaries_nested - trustBoundariesNested = make([]string, len(parsedNestedBoundaries)) - for i, parsedNestedBoundary := range parsedNestedBoundaries { - trustBoundariesNested[i] = fmt.Sprintf("%v", parsedNestedBoundary) - } - } - - var trustBoundaryType model.TrustBoundaryType - switch boundary.Type { - case model.NetworkOnPrem.String(): - trustBoundaryType = model.NetworkOnPrem - case model.NetworkDedicatedHoster.String(): - trustBoundaryType = model.NetworkDedicatedHoster - case model.NetworkVirtualLAN.String(): - trustBoundaryType = model.NetworkVirtualLAN - case model.NetworkCloudProvider.String(): - trustBoundaryType = model.NetworkCloudProvider - case model.NetworkCloudSecurityGroup.String(): - trustBoundaryType = model.NetworkCloudSecurityGroup - case model.NetworkPolicyNamespaceIsolation.String(): - trustBoundaryType = model.NetworkPolicyNamespaceIsolation - case model.ExecutionEnvironment.String(): - trustBoundaryType = model.ExecutionEnvironment - default: - panic(errors.New("unknown 'type' of trust boundary '" + title + "': " + fmt.Sprintf("%v", boundary.Type))) - } - - trustBoundary := model.TrustBoundary{ - Id: id, - Title: title, //fmt.Sprintf("%v", boundary["title"]), - Description: withDefault(fmt.Sprintf("%v", boundary.Description), title), - Type: trustBoundaryType, - Tags: checkTags(lowerCaseAndTrim(boundary.Tags), "trust boundary '"+title+"'"), - TechnicalAssetsInside: technicalAssetsInside, - TrustBoundariesNested: trustBoundariesNested, - } - checkIdSyntax(id) - if _, exists := model.ParsedModelRoot.TrustBoundaries[id]; exists { - panic(errors.New("duplicate id used: " + id)) - } - model.ParsedModelRoot.TrustBoundaries[id] = trustBoundary - for _, technicalAsset := range trustBoundary.TechnicalAssetsInside { - model.DirectContainingTrustBoundaryMappedByTechnicalAssetId[technicalAsset] = trustBoundary - //fmt.Println("Asset "+technicalAsset+" is directly in trust boundary "+trustBoundary.Id) - } - } - checkNestedTrustBoundariesExisting() - - // Shared Runtime =============================================================================== - model.ParsedModelRoot.SharedRuntimes = make(map[string]model.SharedRuntime) - for title, runtime := range modelInput.Shared_runtimes { - id := fmt.Sprintf("%v", runtime.ID) - - var technicalAssetsRunning = make([]string, 0) - if runtime.Technical_assets_running != nil { - parsedRunningAssets := runtime.Technical_assets_running - technicalAssetsRunning = make([]string, len(parsedRunningAssets)) - for i, parsedRunningAsset := range parsedRunningAssets { - assetId := fmt.Sprintf("%v", parsedRunningAsset) - checkTechnicalAssetExists(assetId, "shared runtime '"+title+"'", false) - technicalAssetsRunning[i] = assetId - } - } - - sharedRuntime := model.SharedRuntime{ - Id: id, - Title: title, //fmt.Sprintf("%v", boundary["title"]), - Description: withDefault(fmt.Sprintf("%v", runtime.Description), title), - Tags: checkTags((runtime.Tags), "shared runtime '"+title+"'"), - TechnicalAssetsRunning: technicalAssetsRunning, - } - checkIdSyntax(id) - if _, exists := model.ParsedModelRoot.SharedRuntimes[id]; exists { - panic(errors.New("duplicate id used: " + id)) - } - model.ParsedModelRoot.SharedRuntimes[id] = sharedRuntime - for _, technicalAssetId := range sharedRuntime.TechnicalAssetsRunning { - model.DirectContainingSharedRuntimeMappedByTechnicalAssetId[technicalAssetId] = sharedRuntime - } - } - - // Individual Risk Categories (just used as regular risk categories) =============================================================================== - model.ParsedModelRoot.IndividualRiskCategories = make(map[string]model.RiskCategory) - for title, indivCat := range modelInput.Individual_risk_categories { - id := fmt.Sprintf("%v", indivCat.ID) - - var function model.RiskFunction - switch indivCat.Function { - case model.BusinessSide.String(): - function = model.BusinessSide - case model.Architecture.String(): - function = model.Architecture - case model.Development.String(): - function = model.Development - case model.Operations.String(): - function = model.Operations - default: - panic(errors.New("unknown 'function' value of individual risk category '" + title + "': " + fmt.Sprintf("%v", indivCat.Function))) - } - - var stride model.STRIDE - switch indivCat.STRIDE { - case model.Spoofing.String(): - stride = model.Spoofing - case model.Tampering.String(): - stride = model.Tampering - case model.Repudiation.String(): - stride = model.Repudiation - case model.InformationDisclosure.String(): - stride = model.InformationDisclosure - case model.DenialOfService.String(): - stride = model.DenialOfService - case model.ElevationOfPrivilege.String(): - stride = model.ElevationOfPrivilege - default: - panic(errors.New("unknown 'stride' value of individual risk category '" + title + "': " + fmt.Sprintf("%v", indivCat.STRIDE))) - } - - cat := model.RiskCategory{ - Id: id, - Title: title, - Description: withDefault(fmt.Sprintf("%v", indivCat.Description), title), - Impact: fmt.Sprintf("%v", indivCat.Impact), - ASVS: fmt.Sprintf("%v", indivCat.ASVS), - CheatSheet: fmt.Sprintf("%v", indivCat.Cheat_sheet), - Action: fmt.Sprintf("%v", indivCat.Action), - Mitigation: fmt.Sprintf("%v", indivCat.Mitigation), - Check: fmt.Sprintf("%v", indivCat.Check), - DetectionLogic: fmt.Sprintf("%v", indivCat.Detection_logic), - RiskAssessment: fmt.Sprintf("%v", indivCat.Risk_assessment), - FalsePositives: fmt.Sprintf("%v", indivCat.False_positives), - Function: function, - STRIDE: stride, - ModelFailurePossibleReason: indivCat.Model_failure_possible_reason, - CWE: indivCat.CWE, - } - checkIdSyntax(id) - if _, exists := model.ParsedModelRoot.IndividualRiskCategories[id]; exists { - panic(errors.New("duplicate id used: " + id)) - } - model.ParsedModelRoot.IndividualRiskCategories[id] = cat - - // NOW THE INDIVIDUAL RISK INSTANCES: - //individualRiskInstances := make([]model.Risk, 0) - if indivCat.Risks_identified != nil { // TODO: also add syntax checks of input YAML when linked asset is not found or when syntehtic-id is already used... - for title, indivRiskInstance := range indivCat.Risks_identified { - var severity model.RiskSeverity - var exploitationLikelihood model.RiskExploitationLikelihood - var exploitationImpact model.RiskExploitationImpact - var mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId string - var dataBreachProbability model.DataBreachProbability - var dataBreachTechnicalAssetIDs []string - - switch indivRiskInstance.Severity { - case model.LowSeverity.String(): - severity = model.LowSeverity - case model.MediumSeverity.String(): - severity = model.MediumSeverity - case model.ElevatedSeverity.String(): - severity = model.ElevatedSeverity - case model.HighSeverity.String(): - severity = model.HighSeverity - case model.CriticalSeverity.String(): - severity = model.CriticalSeverity - case "": // added default - severity = model.MediumSeverity - default: - panic(errors.New("unknown 'severity' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", indivRiskInstance.Severity))) - } - - switch indivRiskInstance.Exploitation_likelihood { - case model.Unlikely.String(): - exploitationLikelihood = model.Unlikely - case model.Likely.String(): - exploitationLikelihood = model.Likely - case model.VeryLikely.String(): - exploitationLikelihood = model.VeryLikely - case model.Frequent.String(): - exploitationLikelihood = model.Frequent - case "": // added default - exploitationLikelihood = model.Likely - default: - panic(errors.New("unknown 'exploitation_likelihood' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", indivRiskInstance.Exploitation_likelihood))) - } - - switch indivRiskInstance.Exploitation_impact { - case model.LowImpact.String(): - exploitationImpact = model.LowImpact - case model.MediumImpact.String(): - exploitationImpact = model.MediumImpact - case model.HighImpact.String(): - exploitationImpact = model.HighImpact - case model.VeryHighImpact.String(): - exploitationImpact = model.VeryHighImpact - case "": // added default - exploitationImpact = model.MediumImpact - default: - panic(errors.New("unknown 'exploitation_impact' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", indivRiskInstance.Exploitation_impact))) - } - - if len(indivRiskInstance.Most_relevant_data_asset) > 0 { - mostRelevantDataAssetId = fmt.Sprintf("%v", indivRiskInstance.Most_relevant_data_asset) - checkDataAssetTargetExists(mostRelevantDataAssetId, "individual risk '"+title+"'") - } - - if len(indivRiskInstance.Most_relevant_technical_asset) > 0 { - mostRelevantTechnicalAssetId = fmt.Sprintf("%v", indivRiskInstance.Most_relevant_technical_asset) - checkTechnicalAssetExists(mostRelevantTechnicalAssetId, "individual risk '"+title+"'", false) - } - - if len(indivRiskInstance.Most_relevant_communication_link) > 0 { - mostRelevantCommunicationLinkId = fmt.Sprintf("%v", indivRiskInstance.Most_relevant_communication_link) - checkCommunicationLinkExists(mostRelevantCommunicationLinkId, "individual risk '"+title+"'") - } - - if len(indivRiskInstance.Most_relevant_trust_boundary) > 0 { - mostRelevantTrustBoundaryId = fmt.Sprintf("%v", indivRiskInstance.Most_relevant_trust_boundary) - checkTrustBoundaryExists(mostRelevantTrustBoundaryId, "individual risk '"+title+"'") - } - - if len(indivRiskInstance.Most_relevant_shared_runtime) > 0 { - mostRelevantSharedRuntimeId = fmt.Sprintf("%v", indivRiskInstance.Most_relevant_shared_runtime) - checkSharedRuntimeExists(mostRelevantSharedRuntimeId, "individual risk '"+title+"'") - } - - switch indivRiskInstance.Data_breach_probability { - case model.Improbable.String(): - dataBreachProbability = model.Improbable - case model.Possible.String(): - dataBreachProbability = model.Possible - case model.Probable.String(): - dataBreachProbability = model.Probable - case "": // added default - dataBreachProbability = model.Possible - default: - panic(errors.New("unknown 'data_breach_probability' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", indivRiskInstance.Data_breach_probability))) - } - - if indivRiskInstance.Data_breach_technical_assets != nil { - dataBreachTechnicalAssetIDs = make([]string, len(indivRiskInstance.Data_breach_technical_assets)) - for i, parsedReferencedAsset := range indivRiskInstance.Data_breach_technical_assets { - assetId := fmt.Sprintf("%v", parsedReferencedAsset) - checkTechnicalAssetExists(assetId, "data breach technical assets of individual risk '"+title+"'", false) - dataBreachTechnicalAssetIDs[i] = assetId - } - } - - checkErr(err) - - indivRiskInstance := model.Risk{ - SyntheticId: createSyntheticId(cat.Id, mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId), - Title: fmt.Sprintf("%v", title), - Category: cat, - Severity: severity, - ExploitationLikelihood: exploitationLikelihood, - ExploitationImpact: exploitationImpact, - MostRelevantDataAssetId: mostRelevantDataAssetId, - MostRelevantTechnicalAssetId: mostRelevantTechnicalAssetId, - MostRelevantCommunicationLinkId: mostRelevantCommunicationLinkId, - MostRelevantTrustBoundaryId: mostRelevantTrustBoundaryId, - MostRelevantSharedRuntimeId: mostRelevantSharedRuntimeId, - DataBreachProbability: dataBreachProbability, - DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, - } - model.GeneratedRisksByCategory[cat] = append(model.GeneratedRisksByCategory[cat], indivRiskInstance) - } - } - } - - // Risk Tracking =============================================================================== - model.ParsedModelRoot.RiskTracking = make(map[string]model.RiskTracking) - for syntheticRiskId, riskTracking := range modelInput.Risk_tracking { - justification := fmt.Sprintf("%v", riskTracking.Justification) - checkedBy := fmt.Sprintf("%v", riskTracking.Checked_by) - ticket := fmt.Sprintf("%v", riskTracking.Ticket) - var date time.Time - if len(riskTracking.Date) > 0 { - date, err = time.Parse("2006-01-02", riskTracking.Date) - if err != nil { - panic(errors.New("unable to parse 'date' of risk tracking '" + syntheticRiskId + "': " + riskTracking.Date)) - } - } - - var status model.RiskStatus - switch riskTracking.Status { - case model.Unchecked.String(): - status = model.Unchecked - case model.Mitigated.String(): - status = model.Mitigated - case model.InProgress.String(): - status = model.InProgress - case model.Accepted.String(): - status = model.Accepted - case model.InDiscussion.String(): - status = model.InDiscussion - case model.FalsePositive.String(): - status = model.FalsePositive - default: - panic(errors.New("unknown 'status' value of risk tracking '" + syntheticRiskId + "': " + riskTracking.Status)) - } - - tracking := model.RiskTracking{ - SyntheticRiskId: strings.TrimSpace(syntheticRiskId), - Justification: justification, - CheckedBy: checkedBy, - Ticket: ticket, - Date: date, - Status: status, - } - if strings.Contains(syntheticRiskId, "*") { // contains a wildcard char - deferredRiskTrackingDueToWildcardMatching[syntheticRiskId] = tracking - } else { - model.ParsedModelRoot.RiskTracking[syntheticRiskId] = tracking - } - } - - // ====================== model consistency check (linking) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { - for _, commLink := range technicalAsset.CommunicationLinks { - checkTechnicalAssetExists(commLink.TargetId, "communication link '"+commLink.Title+"' of technical asset '"+technicalAsset.Title+"'", false) - } - } - } else { - log.Fatal("Unable to read/parse model yaml: ", err) - } -} - -func lowerCaseAndTrim(tags []string) []string { - for i := range tags { - tags[i] = strings.ToLower(strings.TrimSpace(tags[i])) - } - return tags -} - -func checkTags(tags []string, where string) []string { - var tagsUsed = make([]string, 0) - if tags != nil { - tagsUsed = make([]string, len(tags)) - for i, parsedEntry := range tags { - referencedTag := fmt.Sprintf("%v", parsedEntry) - checkTagExists(referencedTag, where) - tagsUsed[i] = referencedTag - } - } - return tagsUsed -} - -// in order to prevent Path-Traversal like stuff... -func removePathElementsFromImageFiles(overview model.Overview) model.Overview { - for i, _ := range overview.Images { - newValue := make(map[string]string) - for file, desc := range overview.Images[i] { - newValue[filepath.Base(file)] = desc - } - overview.Images[i] = newValue - } - return overview -} - -func applyWildcardRiskTrackingEvaluation() { - if *verbose { - fmt.Println("Executing risk tracking evaluation") - } - for syntheticRiskIdPattern, riskTracking := range deferredRiskTrackingDueToWildcardMatching { - foundSome := false - var matchingRiskIdExpression = regexp.MustCompile(strings.ReplaceAll(regexp.QuoteMeta(syntheticRiskIdPattern), `\*`, `[^@]+`)) - for syntheticRiskId, _ := range model.GeneratedRisksBySyntheticId { - if matchingRiskIdExpression.Match([]byte(syntheticRiskId)) && hasNotYetAnyDirectNonWildcardRiskTrackings(syntheticRiskId) { - foundSome = true - model.ParsedModelRoot.RiskTracking[syntheticRiskId] = model.RiskTracking{ - SyntheticRiskId: strings.TrimSpace(syntheticRiskId), - Justification: riskTracking.Justification, - CheckedBy: riskTracking.CheckedBy, - Ticket: riskTracking.Ticket, - Status: riskTracking.Status, - Date: riskTracking.Date, - } - } - } - if !foundSome { - if *ignoreOrphanedRiskTracking { - fmt.Println("Wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern) - } else { - panic(errors.New("wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern)) - } - } - } -} - -func hasNotYetAnyDirectNonWildcardRiskTrackings(syntheticRiskId string) bool { - if _, ok := model.ParsedModelRoot.RiskTracking[syntheticRiskId]; ok { - return false - } - return true -} - -func withDefault(value string, defaultWhenEmpty string) string { - trimmed := strings.TrimSpace(value) - if len(trimmed) > 0 && trimmed != "" { - return trimmed - } - return strings.TrimSpace(defaultWhenEmpty) -} - -func createDataFlowId(sourceAssetId, title string) string { - reg, err := regexp.Compile("[^A-Za-z0-9]+") - checkErr(err) - return sourceAssetId + ">" + strings.Trim(reg.ReplaceAllString(strings.ToLower(title), "-"), "- ") -} - -func createSyntheticId(categoryId string, - mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId string) string { - result := categoryId - if len(mostRelevantTechnicalAssetId) > 0 { - result += "@" + mostRelevantTechnicalAssetId - } - if len(mostRelevantCommunicationLinkId) > 0 { - result += "@" + mostRelevantCommunicationLinkId - } - if len(mostRelevantTrustBoundaryId) > 0 { - result += "@" + mostRelevantTrustBoundaryId - } - if len(mostRelevantSharedRuntimeId) > 0 { - result += "@" + mostRelevantSharedRuntimeId - } - if len(mostRelevantDataAssetId) > 0 { - result += "@" + mostRelevantDataAssetId - } - return result -} - -func checkTagExists(referencedTag, where string) { - if !model.Contains(model.ParsedModelRoot.TagsAvailable, referencedTag) { - panic(errors.New("missing referenced tag in overall tag list at " + where + ": " + referencedTag)) - } -} - -func checkDataAssetTargetExists(referencedAsset, where string) { - if _, ok := model.ParsedModelRoot.DataAssets[referencedAsset]; !ok { - panic(errors.New("missing referenced data asset target at " + where + ": " + referencedAsset)) - } -} - -func checkTrustBoundaryExists(referencedId, where string) { - if _, ok := model.ParsedModelRoot.TrustBoundaries[referencedId]; !ok { - panic(errors.New("missing referenced trust boundary at " + where + ": " + referencedId)) - } -} - -func checkSharedRuntimeExists(referencedId, where string) { - if _, ok := model.ParsedModelRoot.SharedRuntimes[referencedId]; !ok { - panic(errors.New("missing referenced shared runtime at " + where + ": " + referencedId)) - } -} - -func checkCommunicationLinkExists(referencedId, where string) { - if _, ok := model.CommunicationLinks[referencedId]; !ok { - panic(errors.New("missing referenced communication link at " + where + ": " + referencedId)) - } -} - -func checkTechnicalAssetExists(referencedAsset, where string, onlyForTweak bool) { - if _, ok := model.ParsedModelRoot.TechnicalAssets[referencedAsset]; !ok { - suffix := "" - if onlyForTweak { - suffix = " (only referenced in diagram tweak)" - } - panic(errors.New("missing referenced technical asset target" + suffix + " at " + where + ": " + referencedAsset)) - } -} - -func checkNestedTrustBoundariesExisting() { - for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { - for _, nestedId := range trustBoundary.TrustBoundariesNested { - if _, ok := model.ParsedModelRoot.TrustBoundaries[nestedId]; !ok { - panic(errors.New("missing referenced nested trust boundary: " + nestedId)) - } - } - } -} - -func hash(s string) string { - h := fnv.New32a() - h.Write([]byte(s)) - return fmt.Sprintf("%v", h.Sum32()) -} - -func writeDataAssetDiagramGraphvizDOT(diagramFilenameDOT string, dpi int) *os.File { - if *verbose { - fmt.Println("Writing data asset diagram input") - } - var dotContent strings.Builder - dotContent.WriteString("digraph generatedModel { concentrate=true \n") - - // Metadata init =============================================================================== - dotContent.WriteString(` graph [ - dpi=` + strconv.Itoa(dpi) + ` - fontname="Verdana" - labelloc="c" - fontsize="20" - splines=false - rankdir="LR" - nodesep=1.0 - ranksep=3.0 - outputorder="nodesfirst" - ]; - node [ - fontcolor="white" - fontname="Verdana" - fontsize="20" - ]; - edge [ - shape="none" - fontname="Verdana" - fontsize="18" - ]; -`) - - // Technical Assets =============================================================================== - techAssets := make([]model.TechnicalAsset, 0) - for _, techAsset := range model.ParsedModelRoot.TechnicalAssets { - techAssets = append(techAssets, techAsset) - } - sort.Sort(model.ByOrderAndIdSort(techAssets)) - for _, technicalAsset := range techAssets { - if len(technicalAsset.DataAssetsStored) > 0 || len(technicalAsset.DataAssetsProcessed) > 0 { - dotContent.WriteString(makeTechAssetNode(technicalAsset, true)) - dotContent.WriteString("\n") - } - } - - // Data Assets =============================================================================== - dataAssets := make([]model.DataAsset, 0) - for _, dataAsset := range model.ParsedModelRoot.DataAssets { - dataAssets = append(dataAssets, dataAsset) - } - sort.Sort(model.ByDataAssetDataBreachProbabilityAndTitleSort(dataAssets)) - for _, dataAsset := range dataAssets { - dotContent.WriteString(makeDataAssetNode(dataAsset)) - dotContent.WriteString("\n") - } - - // Data Asset to Tech Asset links =============================================================================== - for _, technicalAsset := range techAssets { - for _, sourceId := range technicalAsset.DataAssetsStored { - targetId := technicalAsset.Id - dotContent.WriteString("\n") - dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) + - ` [ color="blue" style="solid" ];`) - dotContent.WriteString("\n") - } - for _, sourceId := range technicalAsset.DataAssetsProcessed { - if !model.Contains(technicalAsset.DataAssetsStored, sourceId) { // here only if not already drawn above - targetId := technicalAsset.Id - dotContent.WriteString("\n") - dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) + - ` [ color="#666666" style="dashed" ];`) - dotContent.WriteString("\n") - } - } - } - - dotContent.WriteString("}") - - // Write the DOT file - file, err := os.Create(diagramFilenameDOT) - checkErr(err) - defer file.Close() - _, err = fmt.Fprintln(file, dotContent.String()) - checkErr(err) - return file -} - -func writeDataFlowDiagramGraphvizDOT(diagramFilenameDOT string, dpi int) *os.File { - if *verbose { - fmt.Println("Writing data flow diagram input") - } - var dotContent strings.Builder - dotContent.WriteString("digraph generatedModel { concentrate=false \n") - - // Metadata init =============================================================================== - tweaks := "" - if model.ParsedModelRoot.DiagramTweakNodesep > 0 { - tweaks += "\n nodesep=\"" + strconv.Itoa(model.ParsedModelRoot.DiagramTweakNodesep) + "\"" - } - if model.ParsedModelRoot.DiagramTweakRanksep > 0 { - tweaks += "\n ranksep=\"" + strconv.Itoa(model.ParsedModelRoot.DiagramTweakRanksep) + "\"" - } - suppressBidirectionalArrows := true - splines := "ortho" - if len(model.ParsedModelRoot.DiagramTweakEdgeLayout) > 0 { - switch model.ParsedModelRoot.DiagramTweakEdgeLayout { - case "spline": - splines = "spline" - drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false - case "polyline": - splines = "polyline" - drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false - case "ortho": - splines = "ortho" - suppressBidirectionalArrows = true - case "curved": - splines = "curved" - drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false - case "false": - splines = "false" - drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false - default: - panic(errors.New("unknown value for diagram_tweak_suppress_edge_labels (spline, polyline, ortho, curved, false): " + - model.ParsedModelRoot.DiagramTweakEdgeLayout)) - } - } - rankdir := "TB" - if model.ParsedModelRoot.DiagramTweakLayoutLeftToRight { - rankdir = "LR" - } - modelTitle := "" - addModelTitle := false - if addModelTitle { - modelTitle = `label="` + model.ParsedModelRoot.Title + `"` - } - dotContent.WriteString(` graph [ ` + modelTitle + ` - labelloc=t - fontname="Verdana" - fontsize=40 - outputorder="nodesfirst" - dpi=` + strconv.Itoa(dpi) + ` - splines=` + splines + ` - rankdir="` + rankdir + `" -` + tweaks + ` - ]; - node [ - fontname="Verdana" - fontsize="20" - ]; - edge [ - shape="none" - fontname="Verdana" - fontsize="18" - ]; -`) - - // Trust Boundaries =============================================================================== - var subgraphSnippetsById = make(map[string]string) - // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order - // range over them in sorted (hence re-producible) way: - keys := make([]string, 0) - for k, _ := range model.ParsedModelRoot.TrustBoundaries { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - trustBoundary := model.ParsedModelRoot.TrustBoundaries[key] - var snippet strings.Builder - if len(trustBoundary.TechnicalAssetsInside) > 0 || len(trustBoundary.TrustBoundariesNested) > 0 { - if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks { - // see https://stackoverflow.com/questions/17247455/how-do-i-add-extra-space-between-clusters?noredirect=1&lq=1 - snippet.WriteString("\n subgraph cluster_space_boundary_for_layout_only_1" + hash(trustBoundary.Id) + " {\n") - snippet.WriteString(` graph [ - dpi=` + strconv.Itoa(dpi) + ` - label=<
> - fontsize="21" - style="invis" - color="green" - fontcolor="green" - margin="50.0" - penwidth="6.5" - outputorder="nodesfirst" - ];`) - } - snippet.WriteString("\n subgraph cluster_" + hash(trustBoundary.Id) + " {\n") - color, fontColor, bgColor, style, fontname := colors.RgbHexColorTwilight(), colors.RgbHexColorTwilight() /*"#550E0C"*/, "#FAFAFA", "dashed", "Verdana" - penwidth := 4.5 - if len(trustBoundary.TrustBoundariesNested) > 0 { - //color, fontColor, style, fontname = colors.Blue, colors.Blue, "dashed", "Verdana" - penwidth = 5.5 - } - if len(trustBoundary.ParentTrustBoundaryID()) > 0 { - bgColor = "#F1F1F1" - } - if trustBoundary.Type == model.NetworkPolicyNamespaceIsolation { - fontColor, bgColor = "#222222", "#DFF4FF" - } - if trustBoundary.Type == model.ExecutionEnvironment { - fontColor, bgColor, style = "#555555", "#FFFFF0", "dotted" - } - snippet.WriteString(` graph [ - dpi=` + strconv.Itoa(dpi) + ` - label=<
` + trustBoundary.Title + ` (` + trustBoundary.Type.String() + `)
> - fontsize="21" - style="` + style + `" - color="` + color + `" - bgcolor="` + bgColor + `" - fontcolor="` + fontColor + `" - fontname="` + fontname + `" - penwidth="` + fmt.Sprintf("%f", penwidth) + `" - forcelabels=true - outputorder="nodesfirst" - margin="50.0" - ];`) - snippet.WriteString("\n") - keys := trustBoundary.TechnicalAssetsInside - sort.Strings(keys) - for _, technicalAssetInside := range keys { - //log.Println("About to add technical asset link to trust boundary: ", technicalAssetInside) - technicalAsset := model.ParsedModelRoot.TechnicalAssets[technicalAssetInside] - snippet.WriteString(hash(technicalAsset.Id)) - snippet.WriteString(";\n") - } - keys = trustBoundary.TrustBoundariesNested - sort.Strings(keys) - for _, trustBoundaryNested := range keys { - //log.Println("About to add nested trust boundary to trust boundary: ", trustBoundaryNested) - trustBoundaryNested := model.ParsedModelRoot.TrustBoundaries[trustBoundaryNested] - snippet.WriteString("LINK-NEEDS-REPLACED-BY-cluster_" + hash(trustBoundaryNested.Id)) - snippet.WriteString(";\n") - } - snippet.WriteString(" }\n\n") - if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks { - snippet.WriteString(" }\n\n") - } - } - subgraphSnippetsById[hash(trustBoundary.Id)] = snippet.String() - } - // here replace links and remove from map after replacement (i.e. move snippet into nested) - for i, _ := range subgraphSnippetsById { - re := regexp.MustCompile(`LINK-NEEDS-REPLACED-BY-cluster_([0-9]*);`) - for { - matches := re.FindStringSubmatch(subgraphSnippetsById[i]) - if len(matches) > 0 { - embeddedSnippet := " //nested:" + subgraphSnippetsById[matches[1]] - subgraphSnippetsById[i] = strings.ReplaceAll(subgraphSnippetsById[i], matches[0], embeddedSnippet) - subgraphSnippetsById[matches[1]] = "" // to something like remove it - } else { - break - } - } - } - // now write them all - keys = make([]string, 0) - for k, _ := range subgraphSnippetsById { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - snippet := subgraphSnippetsById[key] - dotContent.WriteString(snippet) - } - - // Technical Assets =============================================================================== - // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order - // range over them in sorted (hence re-producible) way: - // Convert map to slice of values: - techAssets := []model.TechnicalAsset{} - for _, techAsset := range model.ParsedModelRoot.TechnicalAssets { - techAssets = append(techAssets, techAsset) - } - sort.Sort(model.ByOrderAndIdSort(techAssets)) - for _, technicalAsset := range techAssets { - dotContent.WriteString(makeTechAssetNode(technicalAsset, false)) - dotContent.WriteString("\n") - } - - // Data Flows (Technical Communication Links) =============================================================================== - for _, technicalAsset := range techAssets { - for _, dataFlow := range technicalAsset.CommunicationLinks { - sourceId := technicalAsset.Id - targetId := dataFlow.TargetId - //log.Println("About to add link from", sourceId, "to", targetId, "with id", dataFlow.Id) - var arrowStyle, arrowColor, readOrWriteHead, readOrWriteTail string - if dataFlow.Readonly { - readOrWriteHead = "empty" - readOrWriteTail = "odot" - } else { - readOrWriteHead = "normal" - readOrWriteTail = "dot" - } - dir := "forward" - if dataFlow.IsBidirectional() { - if !suppressBidirectionalArrows { // as it does not work as bug in grahviz with ortho: https://gitlab.com/graphviz/graphviz/issues/144 - dir = "both" - } - } - arrowStyle = ` style="` + dataFlow.DetermineArrowLineStyle() + `" penwidth="` + dataFlow.DetermineArrowPenWidth() + `" arrowtail="` + readOrWriteTail + `" arrowhead="` + readOrWriteHead + `" dir="` + dir + `" arrowsize="2.0" ` - arrowColor = ` color="` + dataFlow.DetermineArrowColor() + `"` - tweaks := "" - if dataFlow.DiagramTweakWeight > 0 { - tweaks += " weight=\"" + strconv.Itoa(dataFlow.DiagramTweakWeight) + "\" " - } - - dotContent.WriteString("\n") - dotContent.WriteString(" " + hash(sourceId) + " -> " + hash(targetId) + - ` [` + arrowColor + ` ` + arrowStyle + tweaks + ` constraint=` + strconv.FormatBool(dataFlow.DiagramTweakConstraint) + ` `) - if !model.ParsedModelRoot.DiagramTweakSuppressEdgeLabels { - dotContent.WriteString(` xlabel="` + encode(dataFlow.Protocol.String()) + `" fontcolor="` + dataFlow.DetermineLabelColor() + `" `) - } - dotContent.WriteString(" ];\n") - } - } - - dotContent.WriteString(makeDiagramInvisibleConnectionsTweaks()) - dotContent.WriteString(makeDiagramSameRankNodeTweaks()) - - dotContent.WriteString("}") - - //fmt.Println(dotContent.String()) - - // Write the DOT file - file, err := os.Create(diagramFilenameDOT) - checkErr(err) - defer file.Close() - _, err = fmt.Fprintln(file, dotContent.String()) - checkErr(err) - return file -} - -func makeDiagramInvisibleConnectionsTweaks() string { - // see https://stackoverflow.com/questions/2476575/how-to-control-node-placement-in-graphviz-i-e-avoid-edge-crossings - tweak := "" - if len(model.ParsedModelRoot.DiagramTweakInvisibleConnectionsBetweenAssets) > 0 { - for _, invisibleConnections := range model.ParsedModelRoot.DiagramTweakInvisibleConnectionsBetweenAssets { - assetIDs := strings.Split(invisibleConnections, ":") - if len(assetIDs) == 2 { - checkTechnicalAssetExists(assetIDs[0], "diagram tweak connections", true) - checkTechnicalAssetExists(assetIDs[1], "diagram tweak connections", true) - tweak += "\n" + hash(assetIDs[0]) + " -> " + hash(assetIDs[1]) + " [style=invis]; \n" - } - } - } - return tweak -} - -func makeDiagramSameRankNodeTweaks() string { - // see https://stackoverflow.com/questions/25734244/how-do-i-place-nodes-on-the-same-level-in-dot - tweak := "" - if len(model.ParsedModelRoot.DiagramTweakSameRankAssets) > 0 { - for _, sameRank := range model.ParsedModelRoot.DiagramTweakSameRankAssets { - assetIDs := strings.Split(sameRank, ":") - if len(assetIDs) > 0 { - tweak += "{ rank=same; " - for _, id := range assetIDs { - checkTechnicalAssetExists(id, "diagram tweak same-rank", true) - if len(model.ParsedModelRoot.TechnicalAssets[id].GetTrustBoundaryId()) > 0 { - panic(errors.New("technical assets (referenced in same rank diagram tweak) are inside trust boundaries: " + - fmt.Sprintf("%v", model.ParsedModelRoot.DiagramTweakSameRankAssets))) - } - tweak += " " + hash(id) + "; " - } - tweak += " }" - } - } - } - return tweak -} - -func makeTechAssetNode(technicalAsset model.TechnicalAsset, simplified bool) string { - if simplified { - color := colors.RgbHexColorOutOfScope() - if !technicalAsset.OutOfScope { - risks := technicalAsset.GeneratedRisks() - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - color = colors.RgbHexColorCriticalRisk() - case model.HighSeverity: - color = colors.RgbHexColorHighRisk() - case model.ElevatedSeverity: - color = colors.RgbHexColorElevatedRisk() - case model.MediumSeverity: - color = colors.RgbHexColorMediumRisk() - case model.LowSeverity: - color = colors.RgbHexColorLowRisk() - default: - color = "#444444" // since black is too dark here as fill color - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - color = "#444444" // since black is too dark here as fill color - } - } - return " " + hash(technicalAsset.Id) + ` [ shape="box" style="filled" fillcolor="` + color + `" - label=<` + encode(technicalAsset.Title) + `> penwidth="3.0" color="` + color + `" ]; - ` - } else { - var shape, title string - var lineBreak = "" - switch technicalAsset.Type { - case model.ExternalEntity: - shape = "box" - title = technicalAsset.Title - case model.Process: - shape = "ellipse" - title = technicalAsset.Title - case model.Datastore: - shape = "cylinder" - title = technicalAsset.Title - if technicalAsset.Redundant { - lineBreak = "
" - } - } - - if technicalAsset.UsedAsClientByHuman { - shape = "octagon" - } - - // RAA = Relative Attacker Attractiveness - raa := technicalAsset.RAA - var attackerAttractivenessLabel string - if technicalAsset.OutOfScope { - attackerAttractivenessLabel = "RAA: out of scope" - } else { - attackerAttractivenessLabel = "RAA: " + fmt.Sprintf("%.0f", raa) + " %" - } - - compartmentBorder := "0" - if technicalAsset.MultiTenant { - compartmentBorder = "1" - } - - return " " + hash(technicalAsset.Id) + ` [ - label=<
` + lineBreak + technicalAsset.Technology.String() + `
` + technicalAsset.Size.String() + `
` + encode(title) + `
` + attackerAttractivenessLabel + `
> - shape=` + shape + ` style="` + technicalAsset.DetermineShapeBorderLineStyle() + `,` + technicalAsset.DetermineShapeStyle() + `" penwidth="` + technicalAsset.DetermineShapeBorderPenWidth() + `" fillcolor="` + technicalAsset.DetermineShapeFillColor() + `" - peripheries=` + strconv.Itoa(technicalAsset.DetermineShapePeripheries()) + ` - color="` + technicalAsset.DetermineShapeBorderColor() + "\"\n ]; " - } -} - -func makeDataAssetNode(dataAsset model.DataAsset) string { - var color string - switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() { - case model.Probable: - color = colors.RgbHexColorHighRisk() - case model.Possible: - color = colors.RgbHexColorMediumRisk() - case model.Improbable: - color = colors.RgbHexColorLowRisk() - default: - color = "#444444" // since black is too dark here as fill color - } - if !dataAsset.IsDataBreachPotentialStillAtRisk() { - color = "#444444" // since black is too dark here as fill color - } - return " " + hash(dataAsset.Id) + ` [ label=<` + encode(dataAsset.Title) + `> penwidth="3.0" style="filled" fillcolor="` + color + `" color="` + color + "\"\n ]; " -} - -func encode(value string) string { - return strings.ReplaceAll(value, "&", "&") -} - -func renderDataFlowDiagramGraphvizImage(dotFile *os.File, targetDir string) { - if *verbose { - fmt.Println("Rendering data flow diagram input") - } - // tmp files - tmpFileDOT, err := ioutil.TempFile(model.TempFolder, "diagram-*-.gv") - checkErr(err) - defer os.Remove(tmpFileDOT.Name()) - - tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png") - checkErr(err) - defer os.Remove(tmpFilePNG.Name()) - - // copy into tmp file as input - input, err := ioutil.ReadFile(dotFile.Name()) - if err != nil { - fmt.Println(err) - return - } - err = ioutil.WriteFile(tmpFileDOT.Name(), input, 0644) - if err != nil { - fmt.Println("Error creating", tmpFileDOT.Name()) - fmt.Println(err) - return - } - - // exec - cmd := exec.Command(graphvizDataFlowDiagramConversionCall, tmpFileDOT.Name(), tmpFilePNG.Name()) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - panic(errors.New("graph rendering call failed with error:" + err.Error())) - } - // copy into resulting file - input, err = ioutil.ReadFile(tmpFilePNG.Name()) - if err != nil { - fmt.Println(err) - return - } - err = ioutil.WriteFile(targetDir+"/"+dataFlowDiagramFilenamePNG, input, 0644) - if err != nil { - fmt.Println("Error creating", dataFlowDiagramFilenamePNG) - fmt.Println(err) - return - } -} - -func renderDataAssetDiagramGraphvizImage(dotFile *os.File, targetDir string) { // TODO dedupe with other render...() method here - if *verbose { - fmt.Println("Rendering data asset diagram input") - } - // tmp files - tmpFileDOT, err := ioutil.TempFile(model.TempFolder, "diagram-*-.gv") - checkErr(err) - defer os.Remove(tmpFileDOT.Name()) - - tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png") - checkErr(err) - defer os.Remove(tmpFilePNG.Name()) - - // copy into tmp file as input - input, err := ioutil.ReadFile(dotFile.Name()) - if err != nil { - fmt.Println(err) - return - } - err = ioutil.WriteFile(tmpFileDOT.Name(), input, 0644) - if err != nil { - fmt.Println("Error creating", tmpFileDOT.Name()) - fmt.Println(err) - return - } - - // exec - cmd := exec.Command(graphvizDataAssetDiagramConversionCall, tmpFileDOT.Name(), tmpFilePNG.Name()) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - panic(errors.New("graph rendering call failed with error: " + err.Error())) - } - // copy into resulting file - input, err = ioutil.ReadFile(tmpFilePNG.Name()) - if err != nil { - fmt.Println(err) - return - } - err = ioutil.WriteFile(targetDir+"/"+dataAssetDiagramFilenamePNG, input, 0644) - if err != nil { - fmt.Println("Error creating", dataAssetDiagramFilenamePNG) - fmt.Println(err) - return - } -} diff --git a/model/types.go b/model/types.go deleted file mode 100644 index d22c16db..00000000 --- a/model/types.go +++ /dev/null @@ -1,4089 +0,0 @@ -package model - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/threagile/threagile/colors" - "regexp" - "sort" - "strings" - "time" -) - -const ThreagileVersion = "1.0.0" // Also update into example and stub model files and openapi.yaml -const TempFolder = "/dev/shm" // TODO: make configurable via cmdline arg? - -var ParsedModelRoot ParsedModel - -var CommunicationLinks map[string]CommunicationLink // TODO as part of "ParsedModelRoot"? -var IncomingTechnicalCommunicationLinksMappedByTargetId map[string][]CommunicationLink -var DirectContainingTrustBoundaryMappedByTechnicalAssetId map[string]TrustBoundary -var DirectContainingSharedRuntimeMappedByTechnicalAssetId map[string]SharedRuntime - -var GeneratedRisksByCategory map[RiskCategory][]Risk -var GeneratedRisksBySyntheticId map[string]Risk - -var AllSupportedTags map[string]bool - -func Init() { - CommunicationLinks = make(map[string]CommunicationLink, 0) - IncomingTechnicalCommunicationLinksMappedByTargetId = make(map[string][]CommunicationLink, 0) - DirectContainingTrustBoundaryMappedByTechnicalAssetId = make(map[string]TrustBoundary, 0) - DirectContainingSharedRuntimeMappedByTechnicalAssetId = make(map[string]SharedRuntime, 0) - GeneratedRisksByCategory = make(map[RiskCategory][]Risk, 0) - GeneratedRisksBySyntheticId = make(map[string]Risk, 0) - AllSupportedTags = make(map[string]bool, 0) -} - -func AddToListOfSupportedTags(tags []string) { - for _, tag := range tags { - AllSupportedTags[tag] = true - } -} - -type CustomRiskRule interface { - Category() RiskCategory - SupportedTags() []string - GenerateRisks() []Risk -} - -// === To be used by model macros etc. ======================= - -func AddTagToModelInput(modelInput *ModelInput, tag string, dryRun bool, changes *[]string) { - tag = NormalizeTag(tag) - if !Contains(modelInput.Tags_available, tag) { - *changes = append(*changes, "adding tag: "+tag) - if !dryRun { - modelInput.Tags_available = append(modelInput.Tags_available, tag) - } - } -} - -func NormalizeTag(tag string) string { - return strings.TrimSpace(strings.ToLower(tag)) -} - -func MakeID(val string) string { - reg, _ := regexp.Compile("[^A-Za-z0-9]+") - return strings.Trim(reg.ReplaceAllString(strings.ToLower(val), "-"), "- ") -} - -// === Model Type Stuff ====================================== - -type ModelInput struct { // TODO: Eventually remove this and directly use ParsedModelRoot? But then the error messages for model errors are not quite as good anymore... - Threagile_version string - Title string - Author Author - Date string - Business_overview Overview - Technical_overview Overview - Business_criticality string - Management_summary_comment string - Questions map[string]string - Abuse_cases map[string]string - Security_requirements map[string]string - Tags_available []string - Data_assets map[string]InputDataAsset - Technical_assets map[string]InputTechnicalAsset - Trust_boundaries map[string]InputTrustBoundary - Shared_runtimes map[string]InputSharedRuntime - Individual_risk_categories map[string]InputIndividualRiskCategory - Risk_tracking map[string]InputRiskTracking - Diagram_tweak_nodesep, Diagram_tweak_ranksep int - Diagram_tweak_edge_layout string - Diagram_tweak_suppress_edge_labels bool - Diagram_tweak_layout_left_to_right bool - Diagram_tweak_invisible_connections_between_assets []string - Diagram_tweak_same_rank_assets []string -} - -type InputDataAsset struct { - ID string `json:"id"` - Description string `json:"description"` - Usage string `json:"usage"` - Tags []string `json:"tags"` - Origin string `json:"origin"` - Owner string `json:"owner"` - Quantity string `json:"quantity"` - Confidentiality string `json:"confidentiality"` - Integrity string `json:"integrity"` - Availability string `json:"availability"` - Justification_cia_rating string `json:"justification_cia_rating"` -} - -type InputTechnicalAsset struct { - ID string `json:"id"` - Description string `json:"description"` - Type string `json:"type"` - Usage string `json:"usage"` - Used_as_client_by_human bool `json:"used_as_client_by_human"` - Out_of_scope bool `json:"out_of_scope"` - Justification_out_of_scope string `json:"justification_out_of_scope"` - Size string `json:"size"` - Technology string `json:"technology"` - Tags []string `json:"tags"` - Internet bool `json:"internet"` - Machine string `json:"machine"` - Encryption string `json:"encryption"` - Owner string `json:"owner"` - Confidentiality string `json:"confidentiality"` - Integrity string `json:"integrity"` - Availability string `json:"availability"` - Justification_cia_rating string `json:"justification_cia_rating"` - Multi_tenant bool `json:"multi_tenant"` - Redundant bool `json:"redundant"` - Custom_developed_parts bool `json:"custom_developed_parts"` - Data_assets_processed []string `json:"data_assets_processed"` - Data_assets_stored []string `json:"data_assets_stored"` - Data_formats_accepted []string `json:"data_formats_accepted"` - Diagram_tweak_order int `json:"diagram_tweak_order"` - Communication_links map[string]InputCommunicationLink `json:"communication_links"` -} - -type InputCommunicationLink struct { - Target string `json:"target"` - Description string `json:"description"` - Protocol string `json:"protocol"` - Authentication string `json:"authentication"` - Authorization string `json:"authorization"` - Tags []string `json:"tags"` - VPN bool `json:"vpn"` - IP_filtered bool `json:"ip_filtered"` - Readonly bool `json:"readonly"` - Usage string `json:"usage"` - Data_assets_sent []string `json:"data_assets_sent"` - Data_assets_received []string `json:"data_assets_received"` - Diagram_tweak_weight int `json:"diagram_tweak_weight"` - Diagram_tweak_constraint bool `json:"diagram_tweak_constraint"` -} - -type InputSharedRuntime struct { - ID string `json:"id"` - Description string `json:"description"` - Tags []string `json:"tags"` - Technical_assets_running []string `json:"technical_assets_running"` -} - -type InputTrustBoundary struct { - ID string `json:"id"` - Description string `json:"description"` - Type string `json:"type"` - Tags []string `json:"tags"` - Technical_assets_inside []string `json:"technical_assets_inside"` - Trust_boundaries_nested []string `json:"trust_boundaries_nested"` -} - -type InputIndividualRiskCategory struct { - ID string `json:"id"` - Description string `json:"description"` - Impact string `json:"impact"` - ASVS string `json:"asvs"` - Cheat_sheet string `json:"cheat_sheet"` - Action string `json:"action"` - Mitigation string `json:"mitigation"` - Check string `json:"check"` - Function string `json:"function"` - STRIDE string `json:"stride"` - Detection_logic string `json:"detection_logic"` - Risk_assessment string `json:"risk_assessment"` - False_positives string `json:"false_positives"` - Model_failure_possible_reason bool `json:"model_failure_possible_reason"` - CWE int `json:"cwe"` - Risks_identified map[string]InputRiskIdentified `json:"risks_identified"` -} - -type InputRiskIdentified struct { - Severity string `json:"severity"` - Exploitation_likelihood string `json:"exploitation_likelihood"` - Exploitation_impact string `json:"exploitation_impact"` - Data_breach_probability string `json:"data_breach_probability"` - Data_breach_technical_assets []string `json:"data_breach_technical_assets"` - Most_relevant_data_asset string `json:"most_relevant_data_asset"` - Most_relevant_technical_asset string `json:"most_relevant_technical_asset"` - Most_relevant_communication_link string `json:"most_relevant_communication_link"` - Most_relevant_trust_boundary string `json:"most_relevant_trust_boundary"` - Most_relevant_shared_runtime string `json:"most_relevant_shared_runtime"` -} - -type InputRiskTracking struct { - Status string `json:"status"` - Justification string `json:"justification"` - Ticket string `json:"ticket"` - Date string `json:"date"` - Checked_by string `json:"checked_by"` -} - -// TypeDescription contains a name for a type and its description -type TypeDescription struct { - Name string - Description string -} - -type TypeEnum interface { - String() string - Explain() string -} - -type Quantity int - -const ( - VeryFew Quantity = iota - Few - Many - VeryMany -) - -func QuantityValues() []TypeEnum { - return []TypeEnum{ - VeryFew, - Few, - Many, - VeryMany, - } -} - -func ParseQuantity(value string) (quantity Quantity, err error) { - value = strings.TrimSpace(value) - for _, candidate := range QuantityValues() { - if candidate.String() == value { - return candidate.(Quantity), err - } - } - return quantity, errors.New("Unable to parse into type: " + value) -} - -var QuantityTypeDescription = [...]TypeDescription{ - {"very-few", "Very few"}, - {"few", "Few"}, - {"many", "Many"}, - {"very-many", "Very many"}, -} - -func (what Quantity) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return QuantityTypeDescription[what].Name -} - -func (what Quantity) Explain() string { - return QuantityTypeDescription[what].Description -} - -func (what Quantity) Title() string { - return [...]string{"very few", "few", "many", "very many"}[what] -} - -func (what Quantity) QuantityFactor() float64 { - // fibonacci starting at 1 - return [...]float64{1, 2, 3, 5}[what] -} - -type Confidentiality int - -const ( - Public Confidentiality = iota - Internal - Restricted - Confidential - StrictlyConfidential -) - -func ConfidentialityValues() []TypeEnum { - return []TypeEnum{ - Public, - Internal, - Restricted, - Confidential, - StrictlyConfidential, - } -} - -func ParseConfidentiality(value string) (confidentiality Confidentiality, err error) { - value = strings.TrimSpace(value) - for _, candidate := range ConfidentialityValues() { - if candidate.String() == value { - return candidate.(Confidentiality), err - } - } - return confidentiality, errors.New("Unable to parse into type: " + value) -} - -var ConfidentialityTypeDescription = [...]TypeDescription{ - {"public", "Public available information"}, - {"internal", "(Company) internal information - but all people in the institution can access it"}, - {"restricted", "Internal and with restricted access"}, - {"confidential", "Only a few selected people have access"}, - {"strictly-confidential", "Highest secrecy level"}, -} - -func (what Confidentiality) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return ConfidentialityTypeDescription[what].Name -} - -func (what Confidentiality) Explain() string { - return ConfidentialityTypeDescription[what].Description -} - -func (what Confidentiality) AttackerAttractivenessForAsset() float64 { - // fibonacci starting at 8 - return [...]float64{8, 13, 21, 34, 55}[what] -} -func (what Confidentiality) AttackerAttractivenessForProcessedOrStoredData() float64 { - // fibonacci starting at 5 - return [...]float64{5, 8, 13, 21, 34}[what] -} -func (what Confidentiality) AttackerAttractivenessForInOutTransferredData() float64 { - // fibonacci starting at 2 - return [...]float64{2, 3, 5, 8, 13}[what] -} - -func (what Confidentiality) RatingStringInScale() string { - result := "(rated " - if what == Public { - result += "1" - } - if what == Internal { - result += "2" - } - if what == Restricted { - result += "3" - } - if what == Confidential { - result += "4" - } - if what == StrictlyConfidential { - result += "5" - } - result += " in scale of 5)" - return result -} - -type Criticality int - -const ( - Archive Criticality = iota - Operational - Important - Critical - MissionCritical -) - -func CriticalityValues() []TypeEnum { - return []TypeEnum{ - Archive, - Operational, - Important, - Critical, - MissionCritical, - } -} - -func ParseCriticality(value string) (criticality Criticality, err error) { - value = strings.TrimSpace(value) - for _, candidate := range CriticalityValues() { - if candidate.String() == value { - return candidate.(Criticality), err - } - } - return criticality, errors.New("Unable to parse into type: " + value) -} - -var CriticalityTypeDescription = [...]TypeDescription{ - {"archive", "Stored, not active"}, - {"operational", "If this fails, people will just have an ad-hoc coffee break until it is back"}, - {"important", "Issues here results in angry people"}, - {"critical", "Failure is really expensive or crippling"}, - {"mission-critical", "This must not fail"}, -} - -func (what Criticality) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return CriticalityTypeDescription[what].Name -} - -func (what Criticality) Explain() string { - return CriticalityTypeDescription[what].Description -} - -func (what Criticality) AttackerAttractivenessForAsset() float64 { - // fibonacci starting at 5 - return [...]float64{5, 8, 13, 21, 34}[what] -} -func (what Criticality) AttackerAttractivenessForProcessedOrStoredData() float64 { - // fibonacci starting at 3 - return [...]float64{3, 5, 8, 13, 21}[what] -} -func (what Criticality) AttackerAttractivenessForInOutTransferredData() float64 { - // fibonacci starting at 2 - return [...]float64{2, 3, 5, 8, 13}[what] -} - -func (what Criticality) RatingStringInScale() string { - result := "(rated " - if what == Archive { - result += "1" - } - if what == Operational { - result += "2" - } - if what == Important { - result += "3" - } - if what == Critical { - result += "4" - } - if what == MissionCritical { - result += "5" - } - result += " in scale of 5)" - return result -} - -type TechnicalAssetType int - -const ( - ExternalEntity TechnicalAssetType = iota - Process - Datastore -) - -func TechnicalAssetTypeValues() []TypeEnum { - return []TypeEnum{ - ExternalEntity, - Process, - Datastore, - } -} - -var TechnicalAssetTypeDescription = [...]TypeDescription{ - {"external-entity", "This asset is hosted and managed by a third party"}, - {"process", "A software process"}, - {"datastore", "This asset stores data"}, -} - -func (what TechnicalAssetType) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return TechnicalAssetTypeDescription[what].Name -} - -func (what TechnicalAssetType) Explain() string { - return TechnicalAssetTypeDescription[what].Description -} - -type TechnicalAssetSize int - -const ( - System TechnicalAssetSize = iota - Service - Application - Component -) - -func TechnicalAssetSizeValues() []TypeEnum { - return []TypeEnum{ - System, - Service, - Application, - Component, - } -} - -var TechnicalAssetSizeDescription = [...]TypeDescription{ - {"system", "A system consists of several services"}, - {"service", "A specific service (web, mail, ...)"}, - {"application", "A single application"}, - {"component", "A component of an application (smaller unit like a microservice)"}, -} - -func (what TechnicalAssetSize) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return TechnicalAssetSizeDescription[what].Name -} - -func (what TechnicalAssetSize) Explain() string { - return TechnicalAssetSizeDescription[what].Description -} - -type Authorization int - -const ( - NoneAuthorization Authorization = iota - TechnicalUser - EnduserIdentityPropagation -) - -func AuthorizationValues() []TypeEnum { - return []TypeEnum{ - NoneAuthorization, - TechnicalUser, - EnduserIdentityPropagation, - } -} - -var AuthorizationTypeDescription = [...]TypeDescription{ - {"none", "No authorization"}, - {"technical-user", "Technical user (service-to-service) like DB user credentials"}, - {"enduser-identity-propagation", "Identity of end user propagates to this service"}, -} - -func (what Authorization) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return AuthorizationTypeDescription[what].Name -} - -func (what Authorization) Explain() string { - return AuthorizationTypeDescription[what].Description -} - -type Authentication int - -const ( - NoneAuthentication Authentication = iota - Credentials - SessionId - Token - ClientCertificate - TwoFactor - Externalized -) - -func AuthenticationValues() []TypeEnum { - return []TypeEnum{ - NoneAuthentication, - Credentials, - SessionId, - Token, - ClientCertificate, - TwoFactor, - Externalized, - } -} - -var AuthenticationTypeDescription = [...]TypeDescription{ - {"none", "No authentication"}, - {"credentials", "Username and password, pin or passphrase"}, - {"session-id", "A server generated session id with limited life span"}, - {"token", "A server generated token. Containing session id, other data and is cryptographically signed"}, - {"client-certificate", "A certificate file stored on the client identifying this specific client"}, - {"two-factor", "Credentials plus another factor like a physical object (card) or biometrics"}, - {"externalized", "Some external company handles authentication"}, -} - -func (what Authentication) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - //return [...]string{"none", "credentials", "session-id", "token", "client-certificate", "two-factor", "externalized"}[what] - return AuthenticationTypeDescription[what].Name -} - -func (what Authentication) Explain() string { - return AuthenticationTypeDescription[what].Description -} - -type Usage int - -const ( - Business Usage = iota - DevOps -) - -func UsageValues() []TypeEnum { - return []TypeEnum{ - Business, - DevOps, - } -} - -func ParseUsage(value string) (usage Usage, err error) { - value = strings.TrimSpace(value) - for _, candidate := range UsageValues() { - if candidate.String() == value { - return candidate.(Usage), err - } - } - return usage, errors.New("Unable to parse into type: " + value) -} - -var UsageTypeDescription = [...]TypeDescription{ - {"business", "This system is operational and does business tasks"}, - {"devops", "This system is for development and/or deployment or other operational tasks"}, -} - -func (what Usage) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - //return [...]string{"business", "devops"}[what] - return UsageTypeDescription[what].Name -} - -func (what Usage) Explain() string { - return UsageTypeDescription[what].Description -} - -func (what Usage) Title() string { - return [...]string{"Business", "DevOps"}[what] -} - -type EncryptionStyle int - -const ( - NoneEncryption EncryptionStyle = iota - Transparent - DataWithSymmetricSharedKey - DataWithAsymmetricSharedKey - DataWithEnduserIndividualKey -) - -func EncryptionStyleValues() []TypeEnum { - return []TypeEnum{ - NoneEncryption, - Transparent, - DataWithSymmetricSharedKey, - DataWithAsymmetricSharedKey, - DataWithEnduserIndividualKey, - } -} - -func ParseEncryptionStyle(value string) (encryptionStyle EncryptionStyle, err error) { - value = strings.TrimSpace(value) - for _, candidate := range EncryptionStyleValues() { - if candidate.String() == value { - return candidate.(EncryptionStyle), err - } - } - return encryptionStyle, errors.New("Unable to parse into type: " + value) -} - -var EncryptionStyleTypeDescription = [...]TypeDescription{ - {"none", "No encryption"}, - {"transparent", "Encrypted data at rest"}, - {"data-with-symmetric-shared-key", "Both communication partners have the same key. This must be kept secret"}, - {"data-with-asymmetric-shared-key", "The key is split into public and private. Those two are shared between partners"}, - {"data-with-enduser-individual-key", "The key is (managed) by the end user"}, -} - -func (what EncryptionStyle) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return EncryptionStyleTypeDescription[what].Name -} - -func (what EncryptionStyle) Explain() string { - return EncryptionStyleTypeDescription[what].Description -} - -func (what EncryptionStyle) Title() string { - return [...]string{"None", "Transparent", "Data with Symmetric Shared Key", "Data with Asymmetric Shared Key", "Data with Enduser Individual Key"}[what] -} - -type DataFormat int - -const ( - JSON DataFormat = iota - XML - Serialization - File - CSV -) - -func DataFormatValues() []TypeEnum { - return []TypeEnum{ - JSON, - XML, - Serialization, - File, - CSV, - } -} - -var DataFormatTypeDescription = [...]TypeDescription{ - {"json", "JSON"}, - {"xml", "XML"}, - {"serialization", "Serialized program objects"}, - {"file", "Specific file types for data"}, - {"csv", "CSV"}, -} - -func (what DataFormat) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return DataFormatTypeDescription[what].Name -} - -func (what DataFormat) Explain() string { - return DataFormatTypeDescription[what].Description -} - -func (what DataFormat) Title() string { - return [...]string{"JSON", "XML", "Serialization", "File", "CSV"}[what] -} - -func (what DataFormat) Description() string { - return [...]string{"JSON marshalled object data", "XML structured data", "Serialization-based object graphs", - "File input/uploads", "CSV tabular data"}[what] -} - -type Protocol int - -const ( - UnknownProtocol Protocol = iota - HTTP - HTTPS - WS - WSS - Reverse_proxy_web_protocol - Reverse_proxy_web_protocol_encrypted - MQTT - JDBC - JDBC_encrypted - ODBC - ODBC_encrypted - SQL_access_protocol - SQL_access_protocol_encrypted - NoSQL_access_protocol - NoSQL_access_protocol_encrypted - BINARY - BINARY_encrypted - TEXT - TEXT_encrypted - SSH - SSH_tunnel - SMTP - SMTP_encrypted - POP3 - POP3_encrypted - IMAP - IMAP_encrypted - FTP - FTPS - SFTP - SCP - LDAP - LDAPS - JMS - NFS - SMB - SMB_encrypted - LocalFileAccess - NRPE - XMPP - IIOP - IIOP_encrypted - JRMP - JRMP_encrypted - InProcessLibraryCall - ContainerSpawning -) - -func ProtocolValues() []TypeEnum { - return []TypeEnum{ - UnknownProtocol, - HTTP, - HTTPS, - WS, - WSS, - Reverse_proxy_web_protocol, - Reverse_proxy_web_protocol_encrypted, - MQTT, - JDBC, - JDBC_encrypted, - ODBC, - ODBC_encrypted, - SQL_access_protocol, - SQL_access_protocol_encrypted, - NoSQL_access_protocol, - NoSQL_access_protocol_encrypted, - BINARY, - BINARY_encrypted, - TEXT, - TEXT_encrypted, - SSH, - SSH_tunnel, - SMTP, - SMTP_encrypted, - POP3, - POP3_encrypted, - IMAP, - IMAP_encrypted, - FTP, - FTPS, - SFTP, - SCP, - LDAP, - LDAPS, - JMS, - NFS, - SMB, - SMB_encrypted, - LocalFileAccess, - NRPE, - XMPP, - IIOP, - IIOP_encrypted, - JRMP, - JRMP_encrypted, - InProcessLibraryCall, - ContainerSpawning, - } -} - -var ProtocolTypeDescription = [...]TypeDescription{ - {"unknown-protocol", "Unknown protocol"}, - {"http", "HTTP protocol"}, - {"https", "HTTPS protocol (encrypted)"}, - {"ws", "WebSocket"}, - {"wss", "WebSocket but encrypted"}, - {"reverse-proxy-web-protocol", "Protocols used by reverse proxies"}, - {"reverse-proxy-web-protocol-encrypted", "Protocols used by reverse proxies but encrypted"}, - {"mqtt", "MQTT Message protocol. Encryption via TLS is optional"}, - {"jdbc", "Java Database Connectivity"}, - {"jdbc-encrypted", "Java Database Connectivity but encrypted"}, - {"odbc", "Open Database Connectivity"}, - {"odbc-encrypted", "Open Database Connectivity but encrypted"}, - {"sql-access-protocol", "SQL access protocol"}, - {"sql-access-protocol-encrypted", "SQL access protocol but encrypted"}, - {"nosql-access-protocol", "NOSQL access protocol"}, - {"nosql-access-protocol-encrypted", "NOSQL access protocol but encrypted"}, - {"binary", "Some other binary protocol"}, - {"binary-encrypted", "Some other binary protocol, encrypted"}, - {"text", "Some other text protocol"}, - {"text-encrypted", "Some other text protocol, encrypted"}, - {"ssh", "Secure Shell to execute commands"}, - {"ssh-tunnel", "Secure Shell as a tunnel"}, - {"smtp", "Mail transfer protocol (sending)"}, - {"smtp-encrypted", "Mail transfer protocol (sending), encrypted"}, - {"pop3", "POP 3 mail fetching"}, - {"pop3-encrypted", "POP 3 mail fetching, encrypted"}, - {"imap", "IMAP mail sync protocol"}, - {"imap-encrypted", "IMAP mail sync protocol, encrypted"}, - {"ftp", "File Transfer Protocol"}, - {"ftps", "FTP with TLS"}, - {"sftp", "FTP on SSH"}, - {"scp", "Secure Shell to copy files"}, - {"ldap", "Lightweight Directory Access Protocol - User directories"}, - {"ldaps", "Lightweight Directory Access Protocol - User directories on TLS"}, - {"jms", "Jakarta Messaging"}, - {"nfs", "Network File System"}, - {"smb", "Server Message Block"}, - {"smb-encrypted", "Server Message Block, but encrypted"}, - {"local-file-access", "Data files are on the local system"}, - {"nrpe", "Nagios Remote Plugin Executor"}, - {"xmpp", "Extensible Messaging and Presence Protocol"}, - {"iiop", "Internet Inter-ORB Protocol "}, - {"iiop-encrypted", "Internet Inter-ORB Protocol , encrypted"}, - {"jrmp", "Java Remote Method Protocol"}, - {"jrmp-encrypted", "Java Remote Method Protocol, encrypted"}, - {"in-process-library-call", "Call to local library"}, - {"container-spawning", "Spawn a container"}, -} - -func (what Protocol) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return ProtocolTypeDescription[what].Name -} - -func (what Protocol) Explain() string { - return ProtocolTypeDescription[what].Description -} - -func (what Protocol) IsProcessLocal() bool { - return what == InProcessLibraryCall || what == LocalFileAccess || what == ContainerSpawning -} - -func (what Protocol) IsEncrypted() bool { - return what == HTTPS || what == WSS || what == JDBC_encrypted || what == ODBC_encrypted || - what == NoSQL_access_protocol_encrypted || what == SQL_access_protocol_encrypted || what == BINARY_encrypted || what == TEXT_encrypted || what == SSH || what == SSH_tunnel || - what == FTPS || what == SFTP || what == SCP || what == LDAPS || what == Reverse_proxy_web_protocol_encrypted || - what == IIOP_encrypted || what == JRMP_encrypted || what == SMB_encrypted || what == SMTP_encrypted || what == POP3_encrypted || what == IMAP_encrypted -} - -func (what Protocol) IsPotentialDatabaseAccessProtocol(includingLaxDatabaseProtocols bool) bool { - strictlyDatabaseOnlyProtocol := what == JDBC_encrypted || what == ODBC_encrypted || - what == NoSQL_access_protocol_encrypted || what == SQL_access_protocol_encrypted || what == JDBC || what == ODBC || what == NoSQL_access_protocol || what == SQL_access_protocol - if includingLaxDatabaseProtocols { - // include HTTP for REST-based NoSQL-DBs as well as unknown binary - return strictlyDatabaseOnlyProtocol || what == HTTPS || what == HTTP || what == BINARY || what == BINARY_encrypted - } - return strictlyDatabaseOnlyProtocol -} - -func (what Protocol) IsPotentialWebAccessProtocol() bool { - return what == HTTP || what == HTTPS || what == WS || what == WSS || what == Reverse_proxy_web_protocol || what == Reverse_proxy_web_protocol_encrypted -} - -type TechnicalAssetTechnology int - -const ( - UnknownTechnology TechnicalAssetTechnology = iota - ClientSystem - Browser - Desktop - MobileApp - DevOpsClient - WebServer - WebApplication - ApplicationServer - Database - FileServer - LocalFileSystem - ERP - CMS - WebServiceREST - WebServiceSOAP - EJB - SearchIndex - SearchEngine - ServiceRegistry - ReverseProxy - LoadBalancer - BuildPipeline - SourcecodeRepository - ArtifactRegistry - CodeInspectionPlatform - Monitoring - LDAPServer - ContainerPlatform - BatchProcessing - EventListener - IdentityProvider - IdentityStoreLDAP - IdentityStoreDatabase - Tool - CLI - Task - Function - Gateway // TODO rename to API-Gateway to be more clear? - IoTDevice - MessageQueue - StreamProcessing - ServiceMesh - DataLake - BigDataPlatform - ReportEngine - AI - MailServer - Vault - HSM - WAF - IDS - IPS - Scheduler - Mainframe - BlockStorage - Library -) - -func TechnicalAssetTechnologyValues() []TypeEnum { - return []TypeEnum{ - UnknownTechnology, - ClientSystem, - Browser, - Desktop, - MobileApp, - DevOpsClient, - WebServer, - WebApplication, - ApplicationServer, - Database, - FileServer, - LocalFileSystem, - ERP, - CMS, - WebServiceREST, - WebServiceSOAP, - EJB, - SearchIndex, - SearchEngine, - ServiceRegistry, - ReverseProxy, - LoadBalancer, - BuildPipeline, - SourcecodeRepository, - ArtifactRegistry, - CodeInspectionPlatform, - Monitoring, - LDAPServer, - ContainerPlatform, - BatchProcessing, - EventListener, - IdentityProvider, - IdentityStoreLDAP, - IdentityStoreDatabase, - Tool, - CLI, - Task, - Function, - Gateway, - IoTDevice, - MessageQueue, - StreamProcessing, - ServiceMesh, - DataLake, - BigDataPlatform, - ReportEngine, - AI, - MailServer, - Vault, - HSM, - WAF, - IDS, - IPS, - Scheduler, - Mainframe, - BlockStorage, - Library, - } -} - -var TechnicalAssetTechnologyTypeDescription = [...]TypeDescription{ - {"unknown-technology", "Unknown technology"}, - {"client-system", "A client system"}, - {"browser", "A web browser"}, - {"desktop", "A desktop system (or laptop)"}, - {"mobile-app", "A mobile app (smartphone, tablet)"}, - {"devops-client", "A client used for DevOps"}, - {"web-server", "A web server"}, - {"web-application", "A web application"}, - {"application-server", "An application server (Apache Tomcat, ...)"}, - {"database", "A database"}, - {"file-server", "A file server"}, - {"local-file-system", "The local file system"}, - {"erp", "Enterprise-Resource-Planning"}, - {"cms", "Content Management System"}, - {"web-service-rest", "A REST web service (API)"}, - {"web-service-soap", "A SOAP web service (API)"}, - {"ejb", "Jakarta Enterprise Beans fka Enterprise JavaBeans"}, - {"search-index", "The index database of a search engine"}, - {"search-engine", "A search engine"}, - {"service-registry", "A central place where data schemas can be found and distributed"}, - {"reverse-proxy", "A proxy hiding internal infrastructure from caller making requests. Can also reduce load"}, - {"load-balancer", "A load balancer directing incoming requests to available internal infrastructure"}, - {"build-pipeline", "A software build pipeline"}, - {"sourcecode-repository", "Git or similar"}, - {"artifact-registry", "A registry to store build artifacts"}, - {"code-inspection-platform", "(Static) Code Analysis)"}, - {"monitoring", "A monitoring system (SIEM, logs)"}, - {"ldap-server", "A LDAP server"}, - {"container-platform", "A platform for hosting and executing containers"}, - {"batch-processing", "A set of tools automatically processing data"}, - {"event-listener", "An event listener waiting to be triggered and spring to action"}, - {"identity-provider", "A authentication provider"}, - {"identity-store-ldap", "Authentication data as LDAP"}, - {"identity-store-database", "Authentication data as database"}, - {"tool", "A specific tool"}, - {"cli", "A command line tool"}, - {"task", "A specific task"}, - {"function", "A specific function (maybe RPC ?)"}, - {"gateway", "A gateway connecting two systems or trust boundaries"}, - {"iot-device", "An IoT device"}, - {"message-queue", "A message queue (like MQTT)"}, - {"stream-processing", "Data stream processing"}, - {"service-mesh", "Infrastructure for service-to-service communication"}, - {"data-lake", "A huge database"}, - {"big-data-platform", "Storage for big data"}, - {"report-engine", "Software for report generation"}, - {"ai", "An Artificial Intelligence service"}, - {"mail-server", "A Mail server"}, - {"vault", "Encryption and key management"}, - {"hsm", "Hardware Security Module"}, - {"waf", "Web Application Firewall"}, - {"ids", "Intrusion Detection System"}, - {"ips", "Intrusion Prevention System"}, - {"scheduler", "Scheduled tasks"}, - {"mainframe", "A central, big computer"}, - {"block-storage", "SAN or similar central file storage"}, - {"library", "A software library"}, -} - -func (what TechnicalAssetTechnology) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return TechnicalAssetTechnologyTypeDescription[what].Name -} - -func (what TechnicalAssetTechnology) Explain() string { - return TechnicalAssetTechnologyTypeDescription[what].Description -} - -func (what TechnicalAssetTechnology) IsWebApplication() bool { - return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == CMS || what == IdentityProvider || what == ReportEngine -} - -func (what TechnicalAssetTechnology) IsWebService() bool { - return what == WebServiceREST || what == WebServiceSOAP -} - -func (what TechnicalAssetTechnology) IsIdentityRelated() bool { - return what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase -} - -func (what TechnicalAssetTechnology) IsSecurityControlRelated() bool { - return what == Vault || what == HSM || what == WAF || what == IDS || what == IPS -} - -func (what TechnicalAssetTechnology) IsUnprotectedCommsTolerated() bool { - return what == Monitoring || what == IDS || what == IPS -} - -func (what TechnicalAssetTechnology) IsUnnecessaryDataTolerated() bool { - return what == Monitoring || what == IDS || what == IPS -} - -func (what TechnicalAssetTechnology) IsCloseToHighValueTargetsTolerated() bool { - return what == Monitoring || what == IDS || what == IPS || what == LoadBalancer || what == ReverseProxy -} - -func (what TechnicalAssetTechnology) IsClient() bool { - return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == IoTDevice -} - -func (what TechnicalAssetTechnology) IsUsuallyAbleToPropagateIdentityToOutgoingTargets() bool { - return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || - what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || - what == CMS || what == WebServiceREST || what == WebServiceSOAP || what == EJB || - what == SearchEngine || what == ReverseProxy || what == LoadBalancer || what == IdentityProvider || - what == Tool || what == CLI || what == Task || what == Function || what == Gateway || - what == IoTDevice || what == MessageQueue || what == ServiceMesh || what == ReportEngine || what == WAF || what == Library - -} - -func (what TechnicalAssetTechnology) IsLessProtectedType() bool { - return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == CMS || - what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == BuildPipeline || what == SourcecodeRepository || - what == ArtifactRegistry || what == CodeInspectionPlatform || what == Monitoring || what == IoTDevice || what == AI || what == MailServer || what == Scheduler || - what == Mainframe -} - -func (what TechnicalAssetTechnology) IsUsuallyProcessingEnduserRequests() bool { - return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == ReportEngine -} - -func (what TechnicalAssetTechnology) IsUsuallyStoringEnduserData() bool { - return what == Database || what == ERP || what == FileServer || what == LocalFileSystem || what == BlockStorage || what == MailServer || what == StreamProcessing || what == MessageQueue -} - -func (what TechnicalAssetTechnology) IsExclusivelyFrontendRelated() bool { - return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == CMS || what == ReverseProxy || what == WAF || what == LoadBalancer || what == Gateway || what == IoTDevice -} - -func (what TechnicalAssetTechnology) IsExclusivelyBackendRelated() bool { - return what == Database || what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == SearchIndex || - what == SearchEngine || what == ContainerPlatform || what == BatchProcessing || what == EventListener || what == DataLake || what == BigDataPlatform || what == MessageQueue || - what == StreamProcessing || what == ServiceMesh || what == Vault || what == HSM || what == Scheduler || what == Mainframe || what == FileServer || what == BlockStorage -} - -func (what TechnicalAssetTechnology) IsDevelopmentRelevant() bool { - return what == BuildPipeline || what == SourcecodeRepository || what == ArtifactRegistry || what == CodeInspectionPlatform || what == DevOpsClient -} - -func (what TechnicalAssetTechnology) IsTrafficForwarding() bool { - return what == LoadBalancer || what == ReverseProxy || what == WAF -} - -func (what TechnicalAssetTechnology) IsEmbeddedComponent() bool { - return what == Library -} - -type TechnicalAssetMachine int - -const ( - Physical TechnicalAssetMachine = iota - Virtual - Container - Serverless -) - -func TechnicalAssetMachineValues() []TypeEnum { - return []TypeEnum{ - Physical, - Virtual, - Container, - Serverless, - } -} - -var TechnicalAssetMachineTypeDescription = [...]TypeDescription{ - {"physical", "A physical machine"}, - {"virtual", "A virtual machine"}, - {"container", "A container"}, - {"serverless", "A serverless application"}, -} - -func (what TechnicalAssetMachine) String() string { - return TechnicalAssetMachineTypeDescription[what].Name -} - -func (what TechnicalAssetMachine) Explain() string { - return TechnicalAssetMachineTypeDescription[what].Description -} - -type TrustBoundaryType int - -const ( - NetworkOnPrem TrustBoundaryType = iota - NetworkDedicatedHoster - NetworkVirtualLAN - NetworkCloudProvider - NetworkCloudSecurityGroup - NetworkPolicyNamespaceIsolation - ExecutionEnvironment -) - -func TrustBoundaryTypeValues() []TypeEnum { - return []TypeEnum{ - NetworkOnPrem, - NetworkDedicatedHoster, - NetworkVirtualLAN, - NetworkCloudProvider, - NetworkCloudSecurityGroup, - NetworkPolicyNamespaceIsolation, - ExecutionEnvironment, - } -} - -var TrustBoundaryTypeDescription = [...]TypeDescription{ - {"network-on-prem", "The whole network is on prem"}, - {"network-dedicated-hoster", "The network is at a dedicated hoster"}, - {"network-virtual-lan", "Network is a VLAN"}, - {"network-cloud-provider", "Network is at a cloud provider"}, - {"network-cloud-security-group", "Cloud rules controlling network traffic"}, - {"network-policy-namespace-isolation", "Segregation in a Kubernetes cluster"}, - {"execution-environment", "Logical group of items (not a protective network boundary in that sense). More like a namespace or another logical group of items"}, -} - -func (what TrustBoundaryType) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return TrustBoundaryTypeDescription[what].Name -} - -func (what TrustBoundaryType) Explain() string { - return TrustBoundaryTypeDescription[what].Description -} - -func (what TrustBoundaryType) IsNetworkBoundary() bool { - return what == NetworkOnPrem || what == NetworkDedicatedHoster || what == NetworkVirtualLAN || - what == NetworkCloudProvider || what == NetworkCloudSecurityGroup || what == NetworkPolicyNamespaceIsolation -} - -func (what TrustBoundaryType) IsWithinCloud() bool { - return what == NetworkCloudProvider || what == NetworkCloudSecurityGroup -} - -func (what TrustBoundary) RecursivelyAllTechnicalAssetIDsInside() []string { - result := make([]string, 0) - what.addAssetIDsRecursively(&result) - return result -} - -func (what TrustBoundary) addAssetIDsRecursively(result *[]string) { - *result = append(*result, what.TechnicalAssetsInside...) - for _, nestedBoundaryID := range what.TrustBoundariesNested { - ParsedModelRoot.TrustBoundaries[nestedBoundaryID].addAssetIDsRecursively(result) - } -} - -func (what TrustBoundary) AllParentTrustBoundaryIDs() []string { - result := make([]string, 0) - what.addTrustBoundaryIDsRecursively(&result) - return result -} - -func (what TrustBoundary) addTrustBoundaryIDsRecursively(result *[]string) { - *result = append(*result, what.Id) - parentID := what.ParentTrustBoundaryID() - if len(parentID) > 0 { - ParsedModelRoot.TrustBoundaries[parentID].addTrustBoundaryIDsRecursively(result) - } -} - -func IsSharingSameParentTrustBoundary(left, right TechnicalAsset) bool { - tbIDLeft, tbIDRight := left.GetTrustBoundaryId(), right.GetTrustBoundaryId() - if len(tbIDLeft) == 0 && len(tbIDRight) > 0 { - return false - } - if len(tbIDLeft) > 0 && len(tbIDRight) == 0 { - return false - } - if len(tbIDLeft) == 0 && len(tbIDRight) == 0 { - return true - } - if tbIDLeft == tbIDRight { - return true - } - tbLeft, tbRight := ParsedModelRoot.TrustBoundaries[tbIDLeft], ParsedModelRoot.TrustBoundaries[tbIDRight] - tbParentsLeft, tbParentsRight := tbLeft.AllParentTrustBoundaryIDs(), tbRight.AllParentTrustBoundaryIDs() - for _, parentLeft := range tbParentsLeft { - for _, parentRight := range tbParentsRight { - if parentLeft == parentRight { - return true - } - } - } - return false -} - -type DataAsset struct { - Id string `json:"id"` // TODO: tag here still required? - Title string `json:"title"` // TODO: tag here still required? - Description string `json:"description"` // TODO: tag here still required? - Usage Usage - Tags []string - Origin, Owner string - Quantity Quantity - Confidentiality Confidentiality - Integrity, Availability Criticality - JustificationCiaRating string -} - -func (what DataAsset) IsTaggedWithAny(tags ...string) bool { - return ContainsCaseInsensitiveAny(what.Tags, tags...) -} - -func (what DataAsset) IsTaggedWithBaseTag(basetag string) bool { - return IsTaggedWithBaseTag(what.Tags, basetag) -} - -/* -func (what DataAsset) IsAtRisk() bool { - for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() { - if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 { - return true - } - } - for _, techAsset := range what.StoredByTechnicalAssetsSorted() { - if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 { - return true - } - } - return false -} -*/ -/* -func (what DataAsset) IdentifiedRiskSeverityStillAtRisk() RiskSeverity { - highestRiskSeverity := Low - for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() { - candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) - if candidateSeverity > highestRiskSeverity { - highestRiskSeverity = candidateSeverity - } - } - for _, techAsset := range what.StoredByTechnicalAssetsSorted() { - candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) - if candidateSeverity > highestRiskSeverity { - highestRiskSeverity = candidateSeverity - } - } - return highestRiskSeverity -} -*/ -func (what DataAsset) IdentifiedRisksByResponsibleTechnicalAssetId() map[string][]Risk { - uniqueTechAssetIDsResponsibleForThisDataAsset := make(map[string]interface{}) - for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() { - if len(techAsset.GeneratedRisks()) > 0 { - uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true - } - } - for _, techAsset := range what.StoredByTechnicalAssetsSorted() { - if len(techAsset.GeneratedRisks()) > 0 { - uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true - } - } - - result := make(map[string][]Risk) - for techAssetId, _ := range uniqueTechAssetIDsResponsibleForThisDataAsset { - result[techAssetId] = append(result[techAssetId], ParsedModelRoot.TechnicalAssets[techAssetId].GeneratedRisks()...) - } - return result -} - -func (what DataAsset) IsDataBreachPotentialStillAtRisk() bool { - for _, risk := range FilteredByStillAtRisk() { - for _, techAsset := range risk.DataBreachTechnicalAssetIDs { - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { - return true - } - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsStored, what.Id) { - return true - } - } - } - return false -} - -func (what DataAsset) IdentifiedDataBreachProbability() DataBreachProbability { - highestProbability := Improbable - for _, risk := range AllRisks() { - for _, techAsset := range risk.DataBreachTechnicalAssetIDs { - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { - if risk.DataBreachProbability > highestProbability { - highestProbability = risk.DataBreachProbability - break - } - } - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsStored, what.Id) { - if risk.DataBreachProbability > highestProbability { - highestProbability = risk.DataBreachProbability - break - } - } - } - } - return highestProbability -} - -func (what DataAsset) IdentifiedDataBreachProbabilityStillAtRisk() DataBreachProbability { - highestProbability := Improbable - for _, risk := range FilteredByStillAtRisk() { - for _, techAsset := range risk.DataBreachTechnicalAssetIDs { - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { - if risk.DataBreachProbability > highestProbability { - highestProbability = risk.DataBreachProbability - break - } - } - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsStored, what.Id) { - if risk.DataBreachProbability > highestProbability { - highestProbability = risk.DataBreachProbability - break - } - } - } - } - return highestProbability -} - -func (what DataAsset) IdentifiedDataBreachProbabilityRisksStillAtRisk() []Risk { - result := make([]Risk, 0) - for _, risk := range FilteredByStillAtRisk() { - for _, techAsset := range risk.DataBreachTechnicalAssetIDs { - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { - result = append(result, risk) - break - } - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsStored, what.Id) { - result = append(result, risk) - break - } - } - } - return result -} - -func (what DataAsset) IdentifiedDataBreachProbabilityRisks() []Risk { - result := make([]Risk, 0) - for _, risk := range AllRisks() { - for _, techAsset := range risk.DataBreachTechnicalAssetIDs { - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { - result = append(result, risk) - break - } - if Contains(ParsedModelRoot.TechnicalAssets[techAsset].DataAssetsStored, what.Id) { - result = append(result, risk) - break - } - } - } - return result -} - -func (what DataAsset) ProcessedByTechnicalAssetsSorted() []TechnicalAsset { - result := make([]TechnicalAsset, 0) - for _, technicalAsset := range ParsedModelRoot.TechnicalAssets { - for _, candidateID := range technicalAsset.DataAssetsProcessed { - if candidateID == what.Id { - result = append(result, technicalAsset) - } - } - } - sort.Sort(ByTechnicalAssetTitleSort(result)) - return result -} - -func (what DataAsset) StoredByTechnicalAssetsSorted() []TechnicalAsset { - result := make([]TechnicalAsset, 0) - for _, technicalAsset := range ParsedModelRoot.TechnicalAssets { - for _, candidateID := range technicalAsset.DataAssetsStored { - if candidateID == what.Id { - result = append(result, technicalAsset) - } - } - } - sort.Sort(ByTechnicalAssetTitleSort(result)) - return result -} - -func (what DataAsset) SentViaCommLinksSorted() []CommunicationLink { - result := make([]CommunicationLink, 0) - for _, technicalAsset := range ParsedModelRoot.TechnicalAssets { - for _, commLink := range technicalAsset.CommunicationLinks { - for _, candidateID := range commLink.DataAssetsSent { - if candidateID == what.Id { - result = append(result, commLink) - } - } - } - } - sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) - return result -} - -func (what DataAsset) ReceivedViaCommLinksSorted() []CommunicationLink { - result := make([]CommunicationLink, 0) - for _, technicalAsset := range ParsedModelRoot.TechnicalAssets { - for _, commLink := range technicalAsset.CommunicationLinks { - for _, candidateID := range commLink.DataAssetsReceived { - if candidateID == what.Id { - result = append(result, commLink) - } - } - } - } - sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) - return result -} - -func IsTaggedWithBaseTag(tags []string, basetag string) bool { // basetags are before the colon ":" like in "aws:ec2" it's "aws". The subtag is after the colon. Also a pure "aws" tag matches the basetag "aws" - basetag = strings.ToLower(strings.TrimSpace(basetag)) - for _, tag := range tags { - tag = strings.ToLower(strings.TrimSpace(tag)) - if tag == basetag || strings.HasPrefix(tag, basetag+":") { - return true - } - } - return false -} - -type TechnicalAsset struct { - Id, Title, Description string - Usage Usage - Type TechnicalAssetType - Size TechnicalAssetSize - Technology TechnicalAssetTechnology - Machine TechnicalAssetMachine - Internet, MultiTenant, Redundant, CustomDevelopedParts, OutOfScope, UsedAsClientByHuman bool - Encryption EncryptionStyle - JustificationOutOfScope string - Owner string - Confidentiality Confidentiality - Integrity, Availability Criticality - JustificationCiaRating string - Tags, DataAssetsProcessed, DataAssetsStored []string - DataFormatsAccepted []DataFormat - CommunicationLinks []CommunicationLink - DiagramTweakOrder int - // will be set by separate calculation step: - RAA float64 -} - -func (what TechnicalAsset) IsTaggedWithAny(tags ...string) bool { - return ContainsCaseInsensitiveAny(what.Tags, tags...) -} - -func (what TechnicalAsset) IsTaggedWithBaseTag(basetag string) bool { - return IsTaggedWithBaseTag(what.Tags, basetag) -} - -// first use the tag(s) of the asset itself, then their trust boundaries (recursively up) and then their shared runtime -func (what TechnicalAsset) IsTaggedWithAnyTraversingUp(tags ...string) bool { - if ContainsCaseInsensitiveAny(what.Tags, tags...) { - return true - } - tbID := what.GetTrustBoundaryId() - if len(tbID) > 0 { - if ParsedModelRoot.TrustBoundaries[tbID].IsTaggedWithAnyTraversingUp(tags...) { - return true - } - } - for _, sr := range ParsedModelRoot.SharedRuntimes { - if Contains(sr.TechnicalAssetsRunning, what.Id) && sr.IsTaggedWithAny(tags...) { - return true - } - } - return false -} - -func (what TechnicalAsset) IsSameTrustBoundary(otherAssetId string) bool { - trustBoundaryOfMyAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] - trustBoundaryOfOtherAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] - return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id -} - -func (what TechnicalAsset) IsSameExecutionEnvironment(otherAssetId string) bool { - trustBoundaryOfMyAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] - trustBoundaryOfOtherAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] - if trustBoundaryOfMyAsset.Type == ExecutionEnvironment && trustBoundaryOfOtherAsset.Type == ExecutionEnvironment { - return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id - } - return false -} - -func (what TechnicalAsset) IsSameTrustBoundaryNetworkOnly(otherAssetId string) bool { - trustBoundaryOfMyAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] - if !trustBoundaryOfMyAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then - trustBoundaryOfMyAsset = ParsedModelRoot.TrustBoundaries[trustBoundaryOfMyAsset.ParentTrustBoundaryID()] - } - trustBoundaryOfOtherAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] - if !trustBoundaryOfOtherAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then - trustBoundaryOfOtherAsset = ParsedModelRoot.TrustBoundaries[trustBoundaryOfOtherAsset.ParentTrustBoundaryID()] - } - return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id -} - -func (what TechnicalAsset) HighestSensitivityScore() float64 { - return what.Confidentiality.AttackerAttractivenessForAsset() + - what.Integrity.AttackerAttractivenessForAsset() + - what.Availability.AttackerAttractivenessForAsset() -} - -func (what TechnicalAsset) HighestConfidentiality() Confidentiality { - highest := what.Confidentiality - for _, dataId := range what.DataAssetsProcessed { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Confidentiality > highest { - highest = dataAsset.Confidentiality - } - } - for _, dataId := range what.DataAssetsStored { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Confidentiality > highest { - highest = dataAsset.Confidentiality - } - } - return highest -} - -func (what TechnicalAsset) DataAssetsProcessedSorted() []DataAsset { - result := make([]DataAsset, 0) - for _, assetID := range what.DataAssetsProcessed { - result = append(result, ParsedModelRoot.DataAssets[assetID]) - } - sort.Sort(ByDataAssetTitleSort(result)) - return result -} - -func (what TechnicalAsset) DataAssetsStoredSorted() []DataAsset { - result := make([]DataAsset, 0) - for _, assetID := range what.DataAssetsStored { - result = append(result, ParsedModelRoot.DataAssets[assetID]) - } - sort.Sort(ByDataAssetTitleSort(result)) - return result -} - -func (what TechnicalAsset) DataFormatsAcceptedSorted() []DataFormat { - result := make([]DataFormat, 0) - for _, format := range what.DataFormatsAccepted { - result = append(result, format) - } - sort.Sort(ByDataFormatAcceptedSort(result)) - return result -} - -func (what TechnicalAsset) CommunicationLinksSorted() []CommunicationLink { - result := make([]CommunicationLink, 0) - for _, format := range what.CommunicationLinks { - result = append(result, format) - } - sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) - return result -} - -func (what TechnicalAsset) HighestIntegrity() Criticality { - highest := what.Integrity - for _, dataId := range what.DataAssetsProcessed { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Integrity > highest { - highest = dataAsset.Integrity - } - } - for _, dataId := range what.DataAssetsStored { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Integrity > highest { - highest = dataAsset.Integrity - } - } - return highest -} - -func (what TechnicalAsset) HighestAvailability() Criticality { - highest := what.Availability - for _, dataId := range what.DataAssetsProcessed { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Availability > highest { - highest = dataAsset.Availability - } - } - for _, dataId := range what.DataAssetsStored { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Availability > highest { - highest = dataAsset.Availability - } - } - return highest -} - -func (what TechnicalAsset) HasDirectConnection(otherAssetId string) bool { - for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { - if dataFlow.SourceId == otherAssetId { - return true - } - } - // check both directions, hence two times, just reversed - for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[otherAssetId] { - if dataFlow.SourceId == what.Id { - return true - } - } - return false -} - -func (what TechnicalAsset) GeneratedRisks() []Risk { - resultingRisks := make([]Risk, 0) - if len(SortedRiskCategories()) == 0 { - fmt.Println("Uh, strange, no risks generated (yet?) and asking for them by tech asset...") - } - for _, category := range SortedRiskCategories() { - risks := SortedRisksOfCategory(category) - for _, risk := range risks { - if risk.MostRelevantTechnicalAssetId == what.Id { - resultingRisks = append(resultingRisks, risk) - } - } - } - sort.Sort(ByRiskSeveritySort(resultingRisks)) - return resultingRisks -} - -/* -func (what TechnicalAsset) HighestRiskSeverity() RiskSeverity { - highest := Low - for _, risk := range what.GeneratedRisks() { - if risk.Severity > highest { - highest = risk.Severity - } - } - return highest -} -*/ - -type ByDataAssetDataBreachProbabilityAndTitleSort []DataAsset - -func (what ByDataAssetDataBreachProbabilityAndTitleSort) Len() int { return len(what) } -func (what ByDataAssetDataBreachProbabilityAndTitleSort) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByDataAssetDataBreachProbabilityAndTitleSort) Less(i, j int) bool { - highestDataBreachProbabilityLeft := what[i].IdentifiedDataBreachProbability() - highestDataBreachProbabilityRight := what[j].IdentifiedDataBreachProbability() - if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight { - return what[i].Title < what[j].Title - } - return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight -} - -type ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk []DataAsset - -func (what ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk) Len() int { return len(what) } -func (what ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk) Less(i, j int) bool { - risksLeft := what[i].IdentifiedDataBreachProbabilityRisksStillAtRisk() - risksRight := what[j].IdentifiedDataBreachProbabilityRisksStillAtRisk() - highestDataBreachProbabilityLeft := what[i].IdentifiedDataBreachProbabilityStillAtRisk() - highestDataBreachProbabilityRight := what[j].IdentifiedDataBreachProbabilityStillAtRisk() - if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight { - if len(risksLeft) == 0 && len(risksRight) > 0 { - return false - } - if len(risksLeft) > 0 && len(risksRight) == 0 { - return true - } - return what[i].Title < what[j].Title - } - return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight -} - -type ByOrderAndIdSort []TechnicalAsset - -func (what ByOrderAndIdSort) Len() int { return len(what) } -func (what ByOrderAndIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByOrderAndIdSort) Less(i, j int) bool { - if what[i].DiagramTweakOrder == what[j].DiagramTweakOrder { - return what[i].Id > what[j].Id - } - return what[i].DiagramTweakOrder < what[j].DiagramTweakOrder -} - -type ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk []TechnicalAsset - -func (what ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk) Len() int { return len(what) } -func (what ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk) Less(i, j int) bool { - risksLeft := ReduceToOnlyStillAtRisk(what[i].GeneratedRisks()) - risksRight := ReduceToOnlyStillAtRisk(what[j].GeneratedRisks()) - highestSeverityLeft := HighestSeverityStillAtRisk(risksLeft) - highestSeverityRight := HighestSeverityStillAtRisk(risksRight) - var result bool - if highestSeverityLeft == highestSeverityRight { - if len(risksLeft) == 0 && len(risksRight) > 0 { - return false - } else if len(risksLeft) > 0 && len(risksRight) == 0 { - return true - } else { - result = what[i].Title < what[j].Title - } - } else { - result = highestSeverityLeft > highestSeverityRight - } - if what[i].OutOfScope && what[j].OutOfScope { - result = what[i].Title < what[j].Title - } else if what[i].OutOfScope { - result = false - } else if what[j].OutOfScope { - result = true - } - return result -} - -type ByTechnicalAssetRAAAndTitleSort []TechnicalAsset - -func (what ByTechnicalAssetRAAAndTitleSort) Len() int { return len(what) } -func (what ByTechnicalAssetRAAAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTechnicalAssetRAAAndTitleSort) Less(i, j int) bool { - raaLeft := what[i].RAA - raaRight := what[j].RAA - if raaLeft == raaRight { - return what[i].Title < what[j].Title - } - return raaLeft > raaRight -} - -/* -type ByTechnicalAssetQuickWinsAndTitleSort []TechnicalAsset - -func (what ByTechnicalAssetQuickWinsAndTitleSort) Len() int { return len(what) } -func (what ByTechnicalAssetQuickWinsAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTechnicalAssetQuickWinsAndTitleSort) Less(i, j int) bool { - qwLeft := what[i].QuickWins() - qwRight := what[j].QuickWins() - if qwLeft == qwRight { - return what[i].Title < what[j].Title - } - return qwLeft > qwRight -} -*/ - -type ByTechnicalAssetTitleSort []TechnicalAsset - -func (what ByTechnicalAssetTitleSort) Len() int { return len(what) } -func (what ByTechnicalAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTechnicalAssetTitleSort) Less(i, j int) bool { - return what[i].Title < what[j].Title -} - -type ByTrustBoundaryTitleSort []TrustBoundary - -func (what ByTrustBoundaryTitleSort) Len() int { return len(what) } -func (what ByTrustBoundaryTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTrustBoundaryTitleSort) Less(i, j int) bool { - return what[i].Title < what[j].Title -} - -type BySharedRuntimeTitleSort []SharedRuntime - -func (what BySharedRuntimeTitleSort) Len() int { return len(what) } -func (what BySharedRuntimeTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what BySharedRuntimeTitleSort) Less(i, j int) bool { - return what[i].Title < what[j].Title -} - -type ByDataAssetTitleSort []DataAsset - -func (what ByDataAssetTitleSort) Len() int { return len(what) } -func (what ByDataAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByDataAssetTitleSort) Less(i, j int) bool { - return what[i].Title < what[j].Title -} - -type ByDataFormatAcceptedSort []DataFormat - -func (what ByDataFormatAcceptedSort) Len() int { return len(what) } -func (what ByDataFormatAcceptedSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByDataFormatAcceptedSort) Less(i, j int) bool { - return what[i].String() < what[j].String() -} - -type CommunicationLink struct { - Id, SourceId, TargetId, Title, Description string - Protocol Protocol - Tags []string - VPN, IpFiltered, Readonly bool - Authentication Authentication - Authorization Authorization - Usage Usage - DataAssetsSent, DataAssetsReceived []string - DiagramTweakWeight int - DiagramTweakConstraint bool -} - -func (what CommunicationLink) IsTaggedWithAny(tags ...string) bool { - return ContainsCaseInsensitiveAny(what.Tags, tags...) -} - -func (what CommunicationLink) IsTaggedWithBaseTag(basetag string) bool { - return IsTaggedWithBaseTag(what.Tags, basetag) -} - -type ByTechnicalCommunicationLinkIdSort []CommunicationLink - -func (what ByTechnicalCommunicationLinkIdSort) Len() int { return len(what) } -func (what ByTechnicalCommunicationLinkIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTechnicalCommunicationLinkIdSort) Less(i, j int) bool { - return what[i].Id > what[j].Id -} - -type ByTechnicalCommunicationLinkTitleSort []CommunicationLink - -func (what ByTechnicalCommunicationLinkTitleSort) Len() int { return len(what) } -func (what ByTechnicalCommunicationLinkTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } -func (what ByTechnicalCommunicationLinkTitleSort) Less(i, j int) bool { - return what[i].Title > what[j].Title -} - -type TrustBoundary struct { - Id, Title, Description string - Type TrustBoundaryType - Tags []string - TechnicalAssetsInside []string - TrustBoundariesNested []string -} - -func (what TrustBoundary) IsTaggedWithAny(tags ...string) bool { - return ContainsCaseInsensitiveAny(what.Tags, tags...) -} - -func (what TrustBoundary) IsTaggedWithBaseTag(basetag string) bool { - return IsTaggedWithBaseTag(what.Tags, basetag) -} - -func (what TrustBoundary) IsTaggedWithAnyTraversingUp(tags ...string) bool { - if what.IsTaggedWithAny(tags...) { - return true - } - parentID := what.ParentTrustBoundaryID() - if len(parentID) > 0 && ParsedModelRoot.TrustBoundaries[parentID].IsTaggedWithAnyTraversingUp(tags...) { - return true - } - return false -} - -func (what TrustBoundary) ParentTrustBoundaryID() string { - var result string - for _, candidate := range ParsedModelRoot.TrustBoundaries { - if Contains(candidate.TrustBoundariesNested, what.Id) { - result = candidate.Id - return result - } - } - return result -} - -func (what TrustBoundary) HighestConfidentiality() Confidentiality { - highest := Public - for _, id := range what.RecursivelyAllTechnicalAssetIDsInside() { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestConfidentiality() > highest { - highest = techAsset.HighestConfidentiality() - } - } - return highest -} - -func (what TrustBoundary) HighestIntegrity() Criticality { - highest := Archive - for _, id := range what.RecursivelyAllTechnicalAssetIDsInside() { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestIntegrity() > highest { - highest = techAsset.HighestIntegrity() - } - } - return highest -} - -func (what TrustBoundary) HighestAvailability() Criticality { - highest := Archive - for _, id := range what.RecursivelyAllTechnicalAssetIDsInside() { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestAvailability() > highest { - highest = techAsset.HighestAvailability() - } - } - return highest -} - -type SharedRuntime struct { - Id, Title, Description string - Tags []string - TechnicalAssetsRunning []string -} - -func (what SharedRuntime) IsTaggedWithAny(tags ...string) bool { - return ContainsCaseInsensitiveAny(what.Tags, tags...) -} - -func (what SharedRuntime) IsTaggedWithBaseTag(basetag string) bool { - return IsTaggedWithBaseTag(what.Tags, basetag) -} - -func (what SharedRuntime) HighestConfidentiality() Confidentiality { - highest := Public - for _, id := range what.TechnicalAssetsRunning { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestConfidentiality() > highest { - highest = techAsset.HighestConfidentiality() - } - } - return highest -} - -func (what SharedRuntime) HighestIntegrity() Criticality { - highest := Archive - for _, id := range what.TechnicalAssetsRunning { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestIntegrity() > highest { - highest = techAsset.HighestIntegrity() - } - } - return highest -} - -func (what SharedRuntime) HighestAvailability() Criticality { - highest := Archive - for _, id := range what.TechnicalAssetsRunning { - techAsset := ParsedModelRoot.TechnicalAssets[id] - if techAsset.HighestAvailability() > highest { - highest = techAsset.HighestAvailability() - } - } - return highest -} - -func (what SharedRuntime) TechnicalAssetWithHighestRAA() TechnicalAsset { - result := ParsedModelRoot.TechnicalAssets[what.TechnicalAssetsRunning[0]] - for _, asset := range what.TechnicalAssetsRunning { - candidate := ParsedModelRoot.TechnicalAssets[asset] - if candidate.RAA > result.RAA { - result = candidate - } - } - return result -} - -func (what CommunicationLink) IsAcrossTrustBoundary() bool { - trustBoundaryOfSourceAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId] - trustBoundaryOfTargetAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId] - return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id -} - -func (what CommunicationLink) IsAcrossTrustBoundaryNetworkOnly() bool { - trustBoundaryOfSourceAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId] - if !trustBoundaryOfSourceAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then - trustBoundaryOfSourceAsset = ParsedModelRoot.TrustBoundaries[trustBoundaryOfSourceAsset.ParentTrustBoundaryID()] - } - trustBoundaryOfTargetAsset := DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId] - if !trustBoundaryOfTargetAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then - trustBoundaryOfTargetAsset = ParsedModelRoot.TrustBoundaries[trustBoundaryOfTargetAsset.ParentTrustBoundaryID()] - } - return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id && trustBoundaryOfTargetAsset.Type.IsNetworkBoundary() -} - -func (what CommunicationLink) HighestConfidentiality() Confidentiality { - highest := Public - for _, dataId := range what.DataAssetsSent { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Confidentiality > highest { - highest = dataAsset.Confidentiality - } - } - for _, dataId := range what.DataAssetsReceived { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Confidentiality > highest { - highest = dataAsset.Confidentiality - } - } - return highest -} - -func (what CommunicationLink) HighestIntegrity() Criticality { - highest := Archive - for _, dataId := range what.DataAssetsSent { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Integrity > highest { - highest = dataAsset.Integrity - } - } - for _, dataId := range what.DataAssetsReceived { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Integrity > highest { - highest = dataAsset.Integrity - } - } - return highest -} - -func (what CommunicationLink) HighestAvailability() Criticality { - highest := Archive - for _, dataId := range what.DataAssetsSent { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Availability > highest { - highest = dataAsset.Availability - } - } - for _, dataId := range what.DataAssetsReceived { - dataAsset := ParsedModelRoot.DataAssets[dataId] - if dataAsset.Availability > highest { - highest = dataAsset.Availability - } - } - return highest -} - -func (what CommunicationLink) DataAssetsSentSorted() []DataAsset { - result := make([]DataAsset, 0) - for _, assetID := range what.DataAssetsSent { - result = append(result, ParsedModelRoot.DataAssets[assetID]) - } - sort.Sort(ByDataAssetTitleSort(result)) - return result -} - -func (what CommunicationLink) DataAssetsReceivedSorted() []DataAsset { - result := make([]DataAsset, 0) - for _, assetID := range what.DataAssetsReceived { - result = append(result, ParsedModelRoot.DataAssets[assetID]) - } - sort.Sort(ByDataAssetTitleSort(result)) - return result -} - -type Author struct { - Name string `json:"name"` - Homepage string `json:"homepage"` -} - -type Overview struct { - Description string `json:"description"` - Images []map[string]string `json:"images"` // yes, array of map here, as array keeps the order of the image keys -} - -type ParsedModel struct { - Author Author - Title string - Date time.Time - ManagementSummaryComment string - BusinessOverview Overview - TechnicalOverview Overview - BusinessCriticality Criticality - SecurityRequirements map[string]string - Questions map[string]string - AbuseCases map[string]string - TagsAvailable []string - DataAssets map[string]DataAsset - TechnicalAssets map[string]TechnicalAsset - TrustBoundaries map[string]TrustBoundary - SharedRuntimes map[string]SharedRuntime - IndividualRiskCategories map[string]RiskCategory - RiskTracking map[string]RiskTracking - DiagramTweakNodesep, DiagramTweakRanksep int - DiagramTweakEdgeLayout string - DiagramTweakSuppressEdgeLabels bool - DiagramTweakLayoutLeftToRight bool - DiagramTweakInvisibleConnectionsBetweenAssets []string - DiagramTweakSameRankAssets []string -} - -func SortedTechnicalAssetIDs() []string { - res := make([]string, 0) - for id, _ := range ParsedModelRoot.TechnicalAssets { - res = append(res, id) - } - sort.Strings(res) - return res -} - -func TagsActuallyUsed() []string { - result := make([]string, 0) - for _, tag := range ParsedModelRoot.TagsAvailable { - if len(TechnicalAssetsTaggedWithAny(tag)) > 0 || - len(CommunicationLinksTaggedWithAny(tag)) > 0 || - len(DataAssetsTaggedWithAny(tag)) > 0 || - len(TrustBoundariesTaggedWithAny(tag)) > 0 || - len(SharedRuntimesTaggedWithAny(tag)) > 0 { - result = append(result, tag) - } - } - return result -} - -// === Sorting stuff ===================================== - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfIndividualRiskCategories() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.IndividualRiskCategories { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfSecurityRequirements() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.SecurityRequirements { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfAbuseCases() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.AbuseCases { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfQuestions() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.Questions { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfDataAssets() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.DataAssets { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfTechnicalAssets() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.TechnicalAssets { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -func TechnicalAssetsTaggedWithAny(tags ...string) []TechnicalAsset { - result := make([]TechnicalAsset, 0) - for _, candidate := range ParsedModelRoot.TechnicalAssets { - if candidate.IsTaggedWithAny(tags...) { - result = append(result, candidate) - } - } - return result -} - -func CommunicationLinksTaggedWithAny(tags ...string) []CommunicationLink { - result := make([]CommunicationLink, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - for _, candidate := range asset.CommunicationLinks { - if candidate.IsTaggedWithAny(tags...) { - result = append(result, candidate) - } - } - } - return result -} - -func DataAssetsTaggedWithAny(tags ...string) []DataAsset { - result := make([]DataAsset, 0) - for _, candidate := range ParsedModelRoot.DataAssets { - if candidate.IsTaggedWithAny(tags...) { - result = append(result, candidate) - } - } - return result -} - -func TrustBoundariesTaggedWithAny(tags ...string) []TrustBoundary { - result := make([]TrustBoundary, 0) - for _, candidate := range ParsedModelRoot.TrustBoundaries { - if candidate.IsTaggedWithAny(tags...) { - result = append(result, candidate) - } - } - return result -} - -func SharedRuntimesTaggedWithAny(tags ...string) []SharedRuntime { - result := make([]SharedRuntime, 0) - for _, candidate := range ParsedModelRoot.SharedRuntimes { - if candidate.IsTaggedWithAny(tags...) { - result = append(result, candidate) - } - } - return result -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedTechnicalAssetsByTitle() []TechnicalAsset { - assets := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - assets = append(assets, asset) - } - sort.Sort(ByTechnicalAssetTitleSort(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedDataAssetsByTitle() []DataAsset { - assets := make([]DataAsset, 0) - for _, asset := range ParsedModelRoot.DataAssets { - assets = append(assets, asset) - } - sort.Sort(ByDataAssetTitleSort(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedDataAssetsByDataBreachProbabilityAndTitleStillAtRisk() []DataAsset { - assets := make([]DataAsset, 0) - for _, asset := range ParsedModelRoot.DataAssets { - assets = append(assets, asset) - } - sort.Sort(ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedDataAssetsByDataBreachProbabilityAndTitle() []DataAsset { - assets := make([]DataAsset, 0) - for _, asset := range ParsedModelRoot.DataAssets { - assets = append(assets, asset) - } - sort.Sort(ByDataAssetDataBreachProbabilityAndTitleSortStillAtRisk(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedTechnicalAssetsByRiskSeverityAndTitle() []TechnicalAsset { - assets := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - assets = append(assets, asset) - } - sort.Sort(ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedTechnicalAssetsByRAAAndTitle() []TechnicalAsset { - assets := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - assets = append(assets, asset) - } - sort.Sort(ByTechnicalAssetRAAAndTitleSort(assets)) - return assets -} - -/* -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedTechnicalAssetsByQuickWinsAndTitle() []TechnicalAsset { - assets := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - if !asset.OutOfScope && asset.QuickWins() > 0 { - assets = append(assets, asset) - } - } - sort.Sort(ByTechnicalAssetQuickWinsAndTitleSort(assets)) - return assets -} -*/ - -func OutOfScopeTechnicalAssets() []TechnicalAsset { - assets := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - if asset.OutOfScope { - assets = append(assets, asset) - } - } - sort.Sort(ByTechnicalAssetTitleSort(assets)) - return assets -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfTrustBoundaries() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.TrustBoundaries { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -func SortedTrustBoundariesByTitle() []TrustBoundary { - boundaries := make([]TrustBoundary, 0) - for _, boundary := range ParsedModelRoot.TrustBoundaries { - boundaries = append(boundaries, boundary) - } - sort.Sort(ByTrustBoundaryTitleSort(boundaries)) - return boundaries -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedKeysOfSharedRuntime() []string { - keys := make([]string, 0) - for k, _ := range ParsedModelRoot.SharedRuntimes { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -func SortedSharedRuntimesByTitle() []SharedRuntime { - result := make([]SharedRuntime, 0) - for _, runtime := range ParsedModelRoot.SharedRuntimes { - result = append(result, runtime) - } - sort.Sort(BySharedRuntimeTitleSort(result)) - return result -} - -func QuestionsUnanswered() int { - result := 0 - for _, answer := range ParsedModelRoot.Questions { - if len(strings.TrimSpace(answer)) == 0 { - result++ - } - } - return result -} - -// === Style stuff ======================================= - -// Line Styles: - -// dotted when model forgery attempt (i.e. nothing being sent and received) -func (what CommunicationLink) DetermineArrowLineStyle() string { - if len(what.DataAssetsSent) == 0 && len(what.DataAssetsReceived) == 0 { - return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... - } - if what.Usage == DevOps { - return "dashed" - } - return "solid" -} - -// dotted when model forgery attempt (i.e. nothing being processed or stored) -func (what TechnicalAsset) DetermineShapeBorderLineStyle() string { - if len(what.DataAssetsProcessed) == 0 && len(what.DataAssetsStored) == 0 || what.OutOfScope { - return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... - } - return "solid" -} - -// 3 when redundant -func (what TechnicalAsset) DetermineShapePeripheries() int { - if what.Redundant { - return 2 - } - return 1 -} - -func (what TechnicalAsset) DetermineShapeStyle() string { - return "filled" -} - -func (what TechnicalAsset) GetTrustBoundaryId() string { - for _, trustBoundary := range ParsedModelRoot.TrustBoundaries { - for _, techAssetInside := range trustBoundary.TechnicalAssetsInside { - if techAssetInside == what.Id { - return trustBoundary.Id - } - } - } - return "" -} - -// Pen Widths: - -func (what CommunicationLink) DetermineArrowPenWidth() string { - if what.DetermineArrowColor() == colors.Pink { - return fmt.Sprintf("%f", 3.0) - } - if what.DetermineArrowColor() != colors.Black { - return fmt.Sprintf("%f", 2.5) - } - return fmt.Sprintf("%f", 1.5) -} - -func (what TechnicalAsset) DetermineShapeBorderPenWidth() string { - if what.DetermineShapeBorderColor() == colors.Pink { - return fmt.Sprintf("%f", 3.5) - } - if what.DetermineShapeBorderColor() != colors.Black { - return fmt.Sprintf("%f", 3.0) - } - return fmt.Sprintf("%f", 2.0) -} - -/* -// Loops over all data assets (stored and processed by this technical asset) and determines for each -// data asset, how many percentage of the data risk is reduced when this technical asset has all risks mitigated. -// Example: This means if the data asset is loosing a risk and thus getting from red to amber it counts as 1. -// Other example: When only one out of four lines (see data risk mapping) leading to red tech assets are removed by -// the mitigations, then this counts as 0.25. The overall sum is returned. -func (what TechnicalAsset) QuickWins() float64 { - result := 0.0 - uniqueDataAssetsStoredAndProcessed := make(map[string]interface{}) - for _, dataAssetId := range what.DataAssetsStored { - uniqueDataAssetsStoredAndProcessed[dataAssetId] = true - } - for _, dataAssetId := range what.DataAssetsProcessed { - uniqueDataAssetsStoredAndProcessed[dataAssetId] = true - } - highestSeverity := HighestSeverityStillAtRisk(what.GeneratedRisks()) - for dataAssetId, _ := range uniqueDataAssetsStoredAndProcessed { - dataAsset := ParsedModelRoot.DataAssets[dataAssetId] - if dataAsset.IdentifiedRiskSeverityStillAtRisk() <= highestSeverity { - howManySameLevelCausingUsagesOfThisData := 0.0 - for techAssetId, risks := range dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId() { - if !ParsedModelRoot.TechnicalAssets[techAssetId].OutOfScope { - for _, risk := range risks { - if len(risk.MostRelevantTechnicalAssetId) > 0 { // T O D O caching of generated risks inside the method? - if HighestSeverityStillAtRisk(ParsedModelRoot.TechnicalAssets[risk.MostRelevantTechnicalAssetId].GeneratedRisks()) == highestSeverity { - howManySameLevelCausingUsagesOfThisData++ - break - } - } - } - } - } - if howManySameLevelCausingUsagesOfThisData > 0 { - result += 1.0 / howManySameLevelCausingUsagesOfThisData - } - } - } - return result -} -*/ - -func (what CommunicationLink) IsBidirectional() bool { - return len(what.DataAssetsSent) > 0 && len(what.DataAssetsReceived) > 0 -} - -// Contains tells whether a contains x (in an unsorted slice) -func Contains(a []string, x string) bool { - for _, n := range a { - if x == n { - return true - } - } - return false -} - -func ContainsCaseInsensitiveAny(a []string, x ...string) bool { - for _, n := range a { - for _, c := range x { - if strings.TrimSpace(strings.ToLower(c)) == strings.TrimSpace(strings.ToLower(n)) { - return true - } - } - } - return false -} - -func (what TechnicalAsset) IsZero() bool { - return len(what.Id) == 0 -} - -func (what TechnicalAsset) ProcessesOrStoresDataAsset(dataAssetId string) bool { - if Contains(what.DataAssetsProcessed, dataAssetId) { - return true - } - if Contains(what.DataAssetsStored, dataAssetId) { - return true - } - return false -} - -// red when >= confidential data stored in unencrypted technical asset -func (what TechnicalAsset) DetermineLabelColor() string { - // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here - // Check for red - if what.Integrity == MissionCritical { - return colors.Red - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - for _, processedDataAsset := range what.DataAssetsProcessed { - if ParsedModelRoot.DataAssets[processedDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - // Check for amber - if what.Integrity == Critical { - return colors.Amber - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Integrity == Critical { - return colors.Amber - } - } - for _, processedDataAsset := range what.DataAssetsProcessed { - if ParsedModelRoot.DataAssets[processedDataAsset].Integrity == Critical { - return colors.Amber - } - } - return colors.Black - /* - if what.Encrypted { - return colors.Black - } else { - if what.Confidentiality == StrictlyConfidential { - return colors.Red - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == StrictlyConfidential { - return colors.Red - } - } - if what.Confidentiality == Confidential { - return colors.Amber - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == Confidential { - return colors.Amber - } - } - return colors.Black - } - */ -} - -// red when mission-critical integrity, but still unauthenticated (non-readonly) channels access it -// amber when critical integrity, but still unauthenticated (non-readonly) channels access it -// pink when model forgery attempt (i.e. nothing being processed or stored) -func (what TechnicalAsset) DetermineShapeBorderColor() string { - // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here - // Check for red - if what.Confidentiality == StrictlyConfidential { - return colors.Red - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == StrictlyConfidential { - return colors.Red - } - } - for _, processedDataAsset := range what.DataAssetsProcessed { - if ParsedModelRoot.DataAssets[processedDataAsset].Confidentiality == StrictlyConfidential { - return colors.Red - } - } - // Check for amber - if what.Confidentiality == Confidential { - return colors.Amber - } - for _, storedDataAsset := range what.DataAssetsStored { - if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == Confidential { - return colors.Amber - } - } - for _, processedDataAsset := range what.DataAssetsProcessed { - if ParsedModelRoot.DataAssets[processedDataAsset].Confidentiality == Confidential { - return colors.Amber - } - } - return colors.Black - /* - if what.Integrity == MissionCritical { - for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { - if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication { - return colors.Red - } - } - } - - if what.Integrity == Critical { - for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { - if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication { - return colors.Amber - } - } - } - - if len(what.DataAssetsProcessed) == 0 && len(what.DataAssetsStored) == 0 { - return colors.Pink // pink, because it's strange when too many technical assets process no data... some ok, but many in a diagram ist a sign of model forgery... - } - - return colors.Black - */ -} - -func (what CommunicationLink) DetermineLabelColor() string { - // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here - /* - if dataFlow.Protocol.IsEncrypted() { - return colors.Gray - } else {*/ - // check for red - for _, sentDataAsset := range what.DataAssetsSent { - if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - for _, receivedDataAsset := range what.DataAssetsReceived { - if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - // check for amber - for _, sentDataAsset := range what.DataAssetsSent { - if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == Critical { - return colors.Amber - } - } - for _, receivedDataAsset := range what.DataAssetsReceived { - if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == Critical { - return colors.Amber - } - } - // default - return colors.Gray - -} - -// pink when model forgery attempt (i.e. nothing being sent and received) -func (what CommunicationLink) DetermineArrowColor() string { - // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here - if len(what.DataAssetsSent) == 0 && len(what.DataAssetsReceived) == 0 || - what.Protocol == UnknownProtocol { - return colors.Pink // pink, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... - } - if what.Usage == DevOps { - return colors.MiddleLightGray - } else if what.VPN { - return colors.DarkBlue - } else if what.IpFiltered { - return colors.Brown - } - // check for red - for _, sentDataAsset := range what.DataAssetsSent { - if ParsedModelRoot.DataAssets[sentDataAsset].Confidentiality == StrictlyConfidential { - return colors.Red - } - } - for _, receivedDataAsset := range what.DataAssetsReceived { - if ParsedModelRoot.DataAssets[receivedDataAsset].Confidentiality == StrictlyConfidential { - return colors.Red - } - } - // check for amber - for _, sentDataAsset := range what.DataAssetsSent { - if ParsedModelRoot.DataAssets[sentDataAsset].Confidentiality == Confidential { - return colors.Amber - } - } - for _, receivedDataAsset := range what.DataAssetsReceived { - if ParsedModelRoot.DataAssets[receivedDataAsset].Confidentiality == Confidential { - return colors.Amber - } - } - // default - return colors.Black - /* - } else if dataFlow.Authentication != NoneAuthentication { - return colors.Black - } else { - // check for red - for _, sentDataAsset := range dataFlow.DataAssetsSent { // first check if any red? - if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // first check if any red? - if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == MissionCritical { - return colors.Red - } - } - // check for amber - for _, sentDataAsset := range dataFlow.DataAssetsSent { // then check if any amber? - if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == Critical { - return colors.Amber - } - } - for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // then check if any amber? - if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == Critical { - return colors.Amber - } - } - return colors.Black - } - */ -} - -func (what TechnicalAsset) DetermineShapeFillColor() string { - fillColor := colors.VeryLightGray - if len(what.DataAssetsProcessed) == 0 && len(what.DataAssetsStored) == 0 || - what.Technology == UnknownTechnology { - fillColor = colors.LightPink // lightPink, because it's strange when too many technical assets process no data... some ok, but many in a diagram ist a sign of model forgery... - } else if len(what.CommunicationLinks) == 0 && len(IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id]) == 0 { - fillColor = colors.LightPink - } else if what.Internet { - fillColor = colors.ExtremeLightBlue - } else if what.OutOfScope { - fillColor = colors.OutOfScopeFancy - } else if what.CustomDevelopedParts { - fillColor = colors.CustomDevelopedParts - } - switch what.Machine { - case Physical: - fillColor = colors.DarkenHexColor(fillColor) - case Container: - fillColor = colors.BrightenHexColor(fillColor) - case Serverless: - fillColor = colors.BrightenHexColor(colors.BrightenHexColor(fillColor)) - } - return fillColor -} - -// === Risk stuff ======================================== - -type DataBreachProbability int - -const ( - Improbable DataBreachProbability = iota - Possible - Probable -) - -func DataBreachProbabilityValues() []TypeEnum { - return []TypeEnum{ - Improbable, - Possible, - Probable, - } -} - -var DataBreachProbabilityTypeDescription = [...]TypeDescription{ - {"improbable", "Improbable"}, - {"possible", "Possible"}, - {"probable", "Probable"}, -} - -func (what DataBreachProbability) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return DataBreachProbabilityTypeDescription[what].Name -} - -func (what DataBreachProbability) Explain() string { - return DataBreachProbabilityTypeDescription[what].Description -} - -func (what DataBreachProbability) Title() string { - return [...]string{"Improbable", "Possible", "Probable"}[what] -} - -func (what DataBreachProbability) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -func CalculateSeverity(likelihood RiskExploitationLikelihood, impact RiskExploitationImpact) RiskSeverity { - result := likelihood.Weight() * impact.Weight() - if result <= 1 { - return LowSeverity - } - if result <= 3 { - return MediumSeverity - } - if result <= 8 { - return ElevatedSeverity - } - if result <= 12 { - return HighSeverity - } - return CriticalSeverity -} - -type RiskSeverity int - -const ( - LowSeverity RiskSeverity = iota - MediumSeverity - ElevatedSeverity - HighSeverity - CriticalSeverity -) - -func RiskSeverityValues() []TypeEnum { - return []TypeEnum{ - LowSeverity, - MediumSeverity, - ElevatedSeverity, - HighSeverity, - CriticalSeverity, - } -} - -var RiskSeverityTypeDescription = [...]TypeDescription{ - {"low", "Low"}, - {"medium", "Medium"}, - {"elevated", "Elevated"}, - {"high", "High"}, - {"critical", "Critical"}, -} - -func (what RiskSeverity) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return RiskSeverityTypeDescription[what].Name -} - -func (what RiskSeverity) Explain() string { - return RiskSeverityTypeDescription[what].Description -} - -func (what RiskSeverity) Title() string { - return [...]string{"Low", "Medium", "Elevated", "High", "Critical"}[what] -} - -func (what RiskSeverity) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -type RiskExploitationLikelihood int - -const ( - Unlikely RiskExploitationLikelihood = iota - Likely - VeryLikely - Frequent -) - -func RiskExploitationLikelihoodValues() []TypeEnum { - return []TypeEnum{ - Unlikely, - Likely, - VeryLikely, - Frequent, - } -} - -var RiskExploitationLikelihoodTypeDescription = [...]TypeDescription{ - {"unlikely", "Unlikely"}, - {"likely", "Likely"}, - {"very-likely", "Very-Likely"}, - {"frequent", "Frequent"}, -} - -func (what RiskExploitationLikelihood) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return RiskExploitationLikelihoodTypeDescription[what].Name -} - -func (what RiskExploitationLikelihood) Explain() string { - return RiskExploitationLikelihoodTypeDescription[what].Description -} - -func (what RiskExploitationLikelihood) Title() string { - return [...]string{"Unlikely", "Likely", "Very Likely", "Frequent"}[what] -} - -func (what RiskExploitationLikelihood) Weight() int { - return [...]int{1, 2, 3, 4}[what] -} - -func (what RiskExploitationLikelihood) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -type RiskExploitationImpact int - -const ( - LowImpact RiskExploitationImpact = iota - MediumImpact - HighImpact - VeryHighImpact -) - -func RiskExploitationImpactValues() []TypeEnum { - return []TypeEnum{ - LowImpact, - MediumImpact, - HighImpact, - VeryHighImpact, - } -} - -var RiskExploitationImpactTypeDescription = [...]TypeDescription{ - {"low", "Low"}, - {"medium", "Medium"}, - {"high", "High"}, - {"very-high", "Very High"}, -} - -func (what RiskExploitationImpact) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return RiskExploitationImpactTypeDescription[what].Name -} - -func (what RiskExploitationImpact) Explain() string { - return RiskExploitationImpactTypeDescription[what].Description -} - -func (what RiskExploitationImpact) Title() string { - return [...]string{"Low", "Medium", "High", "Very High"}[what] -} - -func (what RiskExploitationImpact) Weight() int { - return [...]int{1, 2, 3, 4}[what] -} - -func (what RiskExploitationImpact) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -type RiskFunction int - -const ( - BusinessSide RiskFunction = iota - Architecture - Development - Operations -) - -func RiskFunctionValues() []TypeEnum { - return []TypeEnum{ - BusinessSide, - Architecture, - Development, - Operations, - } -} - -var RiskFunctionTypeDescription = [...]TypeDescription{ - {"business-side", "Business"}, - {"architecture", "Architecture"}, - {"development", "Development"}, - {"operations", "Operations"}, -} - -func (what RiskFunction) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return RiskFunctionTypeDescription[what].Name -} - -func (what RiskFunction) Explain() string { - return RiskFunctionTypeDescription[what].Description -} - -func (what RiskFunction) Title() string { - return [...]string{"Business Side", "Architecture", "Development", "Operations"}[what] -} - -func (what RiskFunction) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -type STRIDE int - -const ( - Spoofing STRIDE = iota - Tampering - Repudiation - InformationDisclosure - DenialOfService - ElevationOfPrivilege -) - -func STRIDEValues() []TypeEnum { - return []TypeEnum{ - Spoofing, - Tampering, - Repudiation, - InformationDisclosure, - DenialOfService, - ElevationOfPrivilege, - } -} - -var StrideTypeDescription = [...]TypeDescription{ - {"spoofing", "Spoofing - Authenticity"}, - {"tampering", "Tampering - Integrity"}, - {"repudiation", "Repudiation - Non-repudiability"}, - {"information-disclosure", "Information disclosure - Confidentiality"}, - {"denial-of-service", "Denial of service - Availability"}, - {"elevation-of-privilege", "Elevation of privilege - Authorization"}, -} - -func (what STRIDE) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return StrideTypeDescription[what].Name -} - -func (what STRIDE) Explain() string { - return StrideTypeDescription[what].Description -} - -func (what STRIDE) Title() string { - return [...]string{"Spoofing", "Tampering", "Repudiation", "Information Disclosure", "Denial of Service", "Elevation of Privilege"}[what] -} - -func (what STRIDE) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -type MacroDetails struct { - ID, Title, Description string -} - -type MacroQuestion struct { - ID, Title, Description string - PossibleAnswers []string - MultiSelect bool - DefaultAnswer string -} - -const NoMoreQuestionsID = "" - -func NoMoreQuestions() MacroQuestion { - return MacroQuestion{ - ID: NoMoreQuestionsID, - Title: "", - Description: "", - PossibleAnswers: nil, - MultiSelect: false, - DefaultAnswer: "", - } -} - -func (what MacroQuestion) NoMoreQuestions() bool { - return what.ID == NoMoreQuestionsID -} - -func (what MacroQuestion) IsValueConstrained() bool { - return what.PossibleAnswers != nil && len(what.PossibleAnswers) > 0 -} - -func (what MacroQuestion) IsMatchingValueConstraint(answer string) bool { - if what.IsValueConstrained() { - for _, val := range what.PossibleAnswers { - if strings.ToLower(val) == strings.ToLower(answer) { - return true - } - } - return false - } - return true -} - -type RiskCategory struct { - // TODO: refactor all "Id" here and elsewhere to "ID" - Id string - Title string - Description string - Impact string - ASVS string - CheatSheet string - Action string - Mitigation string - Check string - DetectionLogic string - RiskAssessment string - FalsePositives string - Function RiskFunction - STRIDE STRIDE - ModelFailurePossibleReason bool - CWE int -} - -type ByRiskCategoryTitleSort []RiskCategory - -func (what ByRiskCategoryTitleSort) Len() int { return len(what) } -func (what ByRiskCategoryTitleSort) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByRiskCategoryTitleSort) Less(i, j int) bool { - return what[i].Title < what[j].Title -} - -type ByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk []RiskCategory - -func (what ByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk) Len() int { return len(what) } -func (what ByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk) Less(i, j int) bool { - risksLeft := ReduceToOnlyStillAtRisk(GeneratedRisksByCategory[what[i]]) - risksRight := ReduceToOnlyStillAtRisk(GeneratedRisksByCategory[what[j]]) - highestLeft := HighestSeverityStillAtRisk(risksLeft) - highestRight := HighestSeverityStillAtRisk(risksRight) - if highestLeft == highestRight { - if len(risksLeft) == 0 && len(risksRight) > 0 { - return false - } - if len(risksLeft) > 0 && len(risksRight) == 0 { - return true - } - return what[i].Title < what[j].Title - } - return highestLeft > highestRight -} - -type RiskStatistics struct { - // TODO add also some more like before / after (i.e. with mitigation applied) - Risks map[string]map[string]int `json:"risks"` -} - -type Risk struct { - Category RiskCategory `json:"-"` // just for navigational convenience... not JSON marshalled - CategoryId string `json:"category"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically - RiskStatus RiskStatus `json:"risk_status"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically - Severity RiskSeverity `json:"severity"` - ExploitationLikelihood RiskExploitationLikelihood `json:"exploitation_likelihood"` - ExploitationImpact RiskExploitationImpact `json:"exploitation_impact"` - Title string `json:"title"` - SyntheticId string `json:"synthetic_id"` - MostRelevantDataAssetId string `json:"most_relevant_data_asset"` - MostRelevantTechnicalAssetId string `json:"most_relevant_technical_asset"` - MostRelevantTrustBoundaryId string `json:"most_relevant_trust_boundary"` - MostRelevantSharedRuntimeId string `json:"most_relevant_shared_runtime"` - MostRelevantCommunicationLinkId string `json:"most_relevant_communication_link"` - DataBreachProbability DataBreachProbability `json:"data_breach_probability"` - DataBreachTechnicalAssetIDs []string `json:"data_breach_technical_assets"` - // TODO: refactor all "Id" here to "ID"? -} - -func (what Risk) GetRiskTracking() RiskTracking { // TODO: Unify function naming reagrding Get etc. - var result RiskTracking - if riskTracking, ok := ParsedModelRoot.RiskTracking[what.SyntheticId]; ok { - result = riskTracking - } - return result -} - -func (what Risk) GetRiskTrackingStatusDefaultingUnchecked() RiskStatus { - if riskTracking, ok := ParsedModelRoot.RiskTracking[what.SyntheticId]; ok { - return riskTracking.Status - } - return Unchecked -} - -func (what Risk) IsRiskTracked() bool { - if _, ok := ParsedModelRoot.RiskTracking[what.SyntheticId]; ok { - return true - } - return false -} - -type ByRiskSeveritySort []Risk - -func (what ByRiskSeveritySort) Len() int { return len(what) } -func (what ByRiskSeveritySort) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByRiskSeveritySort) Less(i, j int) bool { - if what[i].Severity == what[j].Severity { - trackingStatusLeft := what[i].GetRiskTrackingStatusDefaultingUnchecked() - trackingStatusRight := what[j].GetRiskTrackingStatusDefaultingUnchecked() - if trackingStatusLeft == trackingStatusRight { - impactLeft := what[i].ExploitationImpact - impactRight := what[j].ExploitationImpact - if impactLeft == impactRight { - likelihoodLeft := what[i].ExploitationLikelihood - likelihoodRight := what[j].ExploitationLikelihood - if likelihoodLeft == likelihoodRight { - return what[i].Title < what[j].Title - } else { - return likelihoodLeft > likelihoodRight - } - } else { - return impactLeft > impactRight - } - } else { - return trackingStatusLeft < trackingStatusRight - } - } - return what[i].Severity > what[j].Severity -} - -type ByDataBreachProbabilitySort []Risk - -func (what ByDataBreachProbabilitySort) Len() int { return len(what) } -func (what ByDataBreachProbabilitySort) Swap(i, j int) { - what[i], what[j] = what[j], what[i] -} -func (what ByDataBreachProbabilitySort) Less(i, j int) bool { - if what[i].DataBreachProbability == what[j].DataBreachProbability { - trackingStatusLeft := what[i].GetRiskTrackingStatusDefaultingUnchecked() - trackingStatusRight := what[j].GetRiskTrackingStatusDefaultingUnchecked() - if trackingStatusLeft == trackingStatusRight { - return what[i].Title < what[j].Title - } else { - return trackingStatusLeft < trackingStatusRight - } - } - return what[i].DataBreachProbability > what[j].DataBreachProbability -} - -type RiskTracking struct { - SyntheticRiskId, Justification, Ticket, CheckedBy string - Status RiskStatus - Date time.Time -} - -type RiskStatus int - -const ( - Unchecked RiskStatus = iota - InDiscussion - Accepted - InProgress - Mitigated - FalsePositive -) - -func RiskStatusValues() []TypeEnum { - return []TypeEnum{ - Unchecked, - InDiscussion, - Accepted, - InProgress, - Mitigated, - FalsePositive, - } -} - -var RiskStatusTypeDescription = [...]TypeDescription{ - {"unchecked", "Risk has not yet been reviewed"}, - {"in-discussion", "Risk is currently being discussed (during review)"}, - {"accepted", "Risk has been accepted (as possibly a corporate risk acceptance process defines)"}, - {"in-progress", "Risk mitigation is currently in progress"}, - {"mitigated", "Risk has been mitigated"}, - {"false-positive", "Risk is a false positive (i.e. no risk at all or not applicable)"}, -} - -func (what RiskStatus) String() string { - // NOTE: maintain list also in schema.json for validation in IDEs - return RiskStatusTypeDescription[what].Name -} - -func (what RiskStatus) Explain() string { - return RiskStatusTypeDescription[what].Description -} - -func (what RiskStatus) Title() string { - return [...]string{"Unchecked", "in Discussion", "Accepted", "in Progress", "Mitigated", "False Positive"}[what] -} - -func (what RiskStatus) MarshalJSON() ([]byte, error) { - return json.Marshal(what.String()) -} - -func (what RiskStatus) IsStillAtRisk() bool { - return what == Unchecked || what == InDiscussion || what == Accepted || what == InProgress -} - -type RiskRule interface { - Category() RiskCategory - GenerateRisks(parsedModel ParsedModel) []Risk -} - -// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: -func SortedRiskCategories() []RiskCategory { - categories := make([]RiskCategory, 0) - for k, _ := range GeneratedRisksByCategory { - categories = append(categories, k) - } - sort.Sort(ByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk(categories)) - return categories -} -func SortedRisksOfCategory(category RiskCategory) []Risk { - risks := GeneratedRisksByCategory[category] - sort.Sort(ByRiskSeveritySort(risks)) - return risks -} - -func CountRisks(risksByCategory map[RiskCategory][]Risk) int { - result := 0 - for _, risks := range risksByCategory { - result += len(risks) - } - return result -} - -func RisksOfOnlySTRIDESpoofing(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == Spoofing { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlySTRIDETampering(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == Tampering { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlySTRIDERepudiation(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == Repudiation { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlySTRIDEInformationDisclosure(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == InformationDisclosure { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlySTRIDEDenialOfService(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == DenialOfService { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlySTRIDEElevationOfPrivilege(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.STRIDE == ElevationOfPrivilege { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlyBusinessSide(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.Function == BusinessSide { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlyArchitecture(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.Function == Architecture { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlyDevelopment(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.Function == Development { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func RisksOfOnlyOperation(risksByCategory map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk) - for _, risks := range risksByCategory { - for _, risk := range risks { - if risk.Category.Function == Operations { - result[risk.Category] = append(result[risk.Category], risk) - } - } - } - return result -} - -func CategoriesOfOnlyRisksStillAtRisk(risksByCategory map[RiskCategory][]Risk) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - categories[risk.Category] = struct{}{} - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func CategoriesOfOnlyCriticalRisks(risksByCategory map[RiskCategory][]Risk, initialRisks bool) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - if risk.Severity == CriticalSeverity { - categories[risk.Category] = struct{}{} - } - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func CategoriesOfOnlyHighRisks(risksByCategory map[RiskCategory][]Risk, initialRisks bool) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - highest := HighestSeverity(GeneratedRisksByCategory[risk.Category]) - if !initialRisks { - highest = HighestSeverityStillAtRisk(GeneratedRisksByCategory[risk.Category]) - } - if risk.Severity == HighSeverity && highest < CriticalSeverity { - categories[risk.Category] = struct{}{} - } - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func CategoriesOfOnlyElevatedRisks(risksByCategory map[RiskCategory][]Risk, initialRisks bool) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - highest := HighestSeverity(GeneratedRisksByCategory[risk.Category]) - if !initialRisks { - highest = HighestSeverityStillAtRisk(GeneratedRisksByCategory[risk.Category]) - } - if risk.Severity == ElevatedSeverity && highest < HighSeverity { - categories[risk.Category] = struct{}{} - } - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func CategoriesOfOnlyMediumRisks(risksByCategory map[RiskCategory][]Risk, initialRisks bool) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - highest := HighestSeverity(GeneratedRisksByCategory[risk.Category]) - if !initialRisks { - highest = HighestSeverityStillAtRisk(GeneratedRisksByCategory[risk.Category]) - } - if risk.Severity == MediumSeverity && highest < ElevatedSeverity { - categories[risk.Category] = struct{}{} - } - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func CategoriesOfOnlyLowRisks(risksByCategory map[RiskCategory][]Risk, initialRisks bool) []RiskCategory { - categories := make(map[RiskCategory]struct{}) // Go's trick of unique elements is a map - for _, risks := range risksByCategory { - for _, risk := range risks { - if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - continue - } - highest := HighestSeverity(GeneratedRisksByCategory[risk.Category]) - if !initialRisks { - highest = HighestSeverityStillAtRisk(GeneratedRisksByCategory[risk.Category]) - } - if risk.Severity == LowSeverity && highest < MediumSeverity { - categories[risk.Category] = struct{}{} - } - } - } - // return as slice (of now unique values) - return keysAsSlice(categories) -} - -func HighestSeverity(risks []Risk) RiskSeverity { - result := LowSeverity - for _, risk := range risks { - if risk.Severity > result { - result = risk.Severity - } - } - return result -} - -func HighestSeverityStillAtRisk(risks []Risk) RiskSeverity { - result := LowSeverity - for _, risk := range risks { - if risk.Severity > result && risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - result = risk.Severity - } - } - return result -} - -func keysAsSlice(categories map[RiskCategory]struct{}) []RiskCategory { - result := make([]RiskCategory, 0, len(categories)) - for k := range categories { - result = append(result, k) - } - return result -} - -func FilteredByOnlyBusinessSide() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Category.Function == BusinessSide { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyArchitecture() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Category.Function == Architecture { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyDevelopment() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Category.Function == Development { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyOperation() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Category.Function == Operations { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyCriticalRisks() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Severity == CriticalSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyHighRisks() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Severity == HighSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyElevatedRisks() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Severity == ElevatedSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyMediumRisks() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Severity == MediumSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByOnlyLowRisks() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.Severity == LowSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilterByModelFailures(risksByCat map[RiskCategory][]Risk) map[RiskCategory][]Risk { - result := make(map[RiskCategory][]Risk, 0) - for riskCat, risks := range risksByCat { - if riskCat.ModelFailurePossibleReason { - result[riskCat] = risks - } - } - return result -} - -func FlattenRiskSlice(risksByCat map[RiskCategory][]Risk) []Risk { - result := make([]Risk, 0) - for _, risks := range risksByCat { - result = append(result, risks...) - } - return result -} - -func TotalRiskCount() int { - count := 0 - for _, risks := range GeneratedRisksByCategory { - count += len(risks) - } - return count -} - -func FilteredByRiskTrackingUnchecked() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Unchecked { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByRiskTrackingInDiscussion() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == InDiscussion { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByRiskTrackingAccepted() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Accepted { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByRiskTrackingInProgress() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == InProgress { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByRiskTrackingMitigated() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Mitigated { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func FilteredByRiskTrackingFalsePositive() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == FalsePositive { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func ReduceToOnlyHighRisk(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.Severity == HighSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyMediumRisk(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.Severity == MediumSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyLowRisk(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.Severity == LowSeverity { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingUnchecked(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Unchecked { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingInDiscussion(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == InDiscussion { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingAccepted(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Accepted { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingInProgress(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == InProgress { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingMitigated(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == Mitigated { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func ReduceToOnlyRiskTrackingFalsePositive(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked() == FalsePositive { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func FilteredByStillAtRisk() []Risk { - filteredRisks := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - filteredRisks = append(filteredRisks, risk) - } - } - } - return filteredRisks -} - -func OverallRiskStatistics() RiskStatistics { - result := RiskStatistics{} - result.Risks = make(map[string]map[string]int) - result.Risks[CriticalSeverity.String()] = make(map[string]int) - result.Risks[CriticalSeverity.String()][Unchecked.String()] = 0 - result.Risks[CriticalSeverity.String()][InDiscussion.String()] = 0 - result.Risks[CriticalSeverity.String()][Accepted.String()] = 0 - result.Risks[CriticalSeverity.String()][InProgress.String()] = 0 - result.Risks[CriticalSeverity.String()][Mitigated.String()] = 0 - result.Risks[CriticalSeverity.String()][FalsePositive.String()] = 0 - result.Risks[HighSeverity.String()] = make(map[string]int) - result.Risks[HighSeverity.String()][Unchecked.String()] = 0 - result.Risks[HighSeverity.String()][InDiscussion.String()] = 0 - result.Risks[HighSeverity.String()][Accepted.String()] = 0 - result.Risks[HighSeverity.String()][InProgress.String()] = 0 - result.Risks[HighSeverity.String()][Mitigated.String()] = 0 - result.Risks[HighSeverity.String()][FalsePositive.String()] = 0 - result.Risks[ElevatedSeverity.String()] = make(map[string]int) - result.Risks[ElevatedSeverity.String()][Unchecked.String()] = 0 - result.Risks[ElevatedSeverity.String()][InDiscussion.String()] = 0 - result.Risks[ElevatedSeverity.String()][Accepted.String()] = 0 - result.Risks[ElevatedSeverity.String()][InProgress.String()] = 0 - result.Risks[ElevatedSeverity.String()][Mitigated.String()] = 0 - result.Risks[ElevatedSeverity.String()][FalsePositive.String()] = 0 - result.Risks[MediumSeverity.String()] = make(map[string]int) - result.Risks[MediumSeverity.String()][Unchecked.String()] = 0 - result.Risks[MediumSeverity.String()][InDiscussion.String()] = 0 - result.Risks[MediumSeverity.String()][Accepted.String()] = 0 - result.Risks[MediumSeverity.String()][InProgress.String()] = 0 - result.Risks[MediumSeverity.String()][Mitigated.String()] = 0 - result.Risks[MediumSeverity.String()][FalsePositive.String()] = 0 - result.Risks[LowSeverity.String()] = make(map[string]int) - result.Risks[LowSeverity.String()][Unchecked.String()] = 0 - result.Risks[LowSeverity.String()][InDiscussion.String()] = 0 - result.Risks[LowSeverity.String()][Accepted.String()] = 0 - result.Risks[LowSeverity.String()][InProgress.String()] = 0 - result.Risks[LowSeverity.String()][Mitigated.String()] = 0 - result.Risks[LowSeverity.String()][FalsePositive.String()] = 0 - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - result.Risks[risk.Severity.String()][risk.GetRiskTrackingStatusDefaultingUnchecked().String()]++ - } - } - return result -} - -func AllRisks() []Risk { - result := make([]Risk, 0) - for _, risks := range GeneratedRisksByCategory { - for _, risk := range risks { - result = append(result, risk) - } - } - return result -} - -func ReduceToOnlyStillAtRisk(risks []Risk) []Risk { - filteredRisks := make([]Risk, 0) - for _, risk := range risks { - if risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - filteredRisks = append(filteredRisks, risk) - } - } - return filteredRisks -} - -func HighestExploitationLikelihood(risks []Risk) RiskExploitationLikelihood { - result := Unlikely - for _, risk := range risks { - if risk.ExploitationLikelihood > result { - result = risk.ExploitationLikelihood - } - } - return result -} - -func HighestExploitationImpact(risks []Risk) RiskExploitationImpact { - result := LowImpact - for _, risk := range risks { - if risk.ExploitationImpact > result { - result = risk.ExploitationImpact - } - } - return result -} - -func InScopeTechnicalAssets() []TechnicalAsset { - result := make([]TechnicalAsset, 0) - for _, asset := range ParsedModelRoot.TechnicalAssets { - if !asset.OutOfScope { - result = append(result, asset) - } - } - return result -} diff --git a/pkg/common/attacker-focus.go b/pkg/common/attacker-focus.go new file mode 100644 index 00000000..931d2861 --- /dev/null +++ b/pkg/common/attacker-focus.go @@ -0,0 +1,7 @@ +package common + +type AttackerFocus struct { + Asset int // fibonacci sequence base index + ProcessedOrStoredData int // fibonacci sequence base index + TransferredData int // fibonacci sequence base index +} diff --git a/pkg/common/attractiveness.go b/pkg/common/attractiveness.go new file mode 100644 index 00000000..1e31b8c1 --- /dev/null +++ b/pkg/common/attractiveness.go @@ -0,0 +1,8 @@ +package common + +type Attractiveness struct { + Quantity int + Confidentiality AttackerFocus + Integrity AttackerFocus + Availability AttackerFocus +} diff --git a/pkg/common/config.go b/pkg/common/config.go new file mode 100644 index 00000000..8fe33471 --- /dev/null +++ b/pkg/common/config.go @@ -0,0 +1,373 @@ +package common + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" +) + +type Config struct { + BuildTimestamp string + Verbose bool + Interactive bool + + AppFolder string + BinFolder string + DataFolder string + OutputFolder string + ServerFolder string + TempFolder string + KeyFolder string + + InputFile string + DataFlowDiagramFilenamePNG string + DataAssetDiagramFilenamePNG string + DataFlowDiagramFilenameDOT string + DataAssetDiagramFilenameDOT string + ReportFilename string + ExcelRisksFilename string + ExcelTagsFilename string + JsonRisksFilename string + JsonTechnicalAssetsFilename string + JsonStatsFilename string + TemplateFilename string + + RAAPlugin string + RiskRulesPlugins []string + SkipRiskRules string + ExecuteModelMacro string + + ServerMode bool + DiagramDPI int + ServerPort int + GraphvizDPI int + MaxGraphvizDPI int + BackupHistoryFilesToKeep int + + AddModelTitle bool + KeepDiagramSourceFiles bool + IgnoreOrphanedRiskTracking bool + + Attractiveness Attractiveness +} + +func (c *Config) Defaults(buildTimestamp string) *Config { + *c = Config{ + BuildTimestamp: buildTimestamp, + Verbose: false, + + AppFolder: AppDir, + BinFolder: BinDir, + DataFolder: DataDir, + OutputFolder: OutputDir, + ServerFolder: ServerDir, + TempFolder: TempDir, + KeyFolder: KeyDir, + + InputFile: InputFile, + DataFlowDiagramFilenamePNG: DataFlowDiagramFilenamePNG, + DataAssetDiagramFilenamePNG: DataAssetDiagramFilenamePNG, + DataFlowDiagramFilenameDOT: DataFlowDiagramFilenameDOT, + DataAssetDiagramFilenameDOT: DataAssetDiagramFilenameDOT, + ReportFilename: ReportFilename, + ExcelRisksFilename: ExcelRisksFilename, + ExcelTagsFilename: ExcelTagsFilename, + JsonRisksFilename: JsonRisksFilename, + JsonTechnicalAssetsFilename: JsonTechnicalAssetsFilename, + JsonStatsFilename: JsonStatsFilename, + TemplateFilename: TemplateFilename, + RAAPlugin: RAAPluginName, + RiskRulesPlugins: make([]string, 0), + SkipRiskRules: "", + ExecuteModelMacro: "", + ServerMode: false, + ServerPort: DefaultServerPort, + + GraphvizDPI: DefaultGraphvizDPI, + BackupHistoryFilesToKeep: DefaultBackupHistoryFilesToKeep, + + AddModelTitle: false, + KeepDiagramSourceFiles: false, + IgnoreOrphanedRiskTracking: false, + + Attractiveness: Attractiveness{ + Quantity: 0, + Confidentiality: AttackerFocus{ + Asset: 0, + ProcessedOrStoredData: 0, + TransferredData: 0, + }, + Integrity: AttackerFocus{ + Asset: 0, + ProcessedOrStoredData: 0, + TransferredData: 0, + }, + Availability: AttackerFocus{ + Asset: 0, + ProcessedOrStoredData: 0, + TransferredData: 0, + }, + }, + } + + return c +} + +func (c *Config) Load(configFilename string) error { + if len(configFilename) == 0 { + return nil + } + + data, readError := os.ReadFile(filepath.Clean(configFilename)) + if readError != nil { + return readError + } + + values := make(map[string]any) + parseError := json.Unmarshal(data, &values) + if parseError != nil { + return fmt.Errorf("failed to parse config file %q: %v", configFilename, parseError) + } + + var config Config + unmarshalError := json.Unmarshal(data, &config) + if unmarshalError != nil { + return fmt.Errorf("failed to parse config file %q: %v", configFilename, unmarshalError) + } + + c.Merge(config, values) + + c.TempFolder = c.CleanPath(c.TempFolder) + tempDirError := os.MkdirAll(c.TempFolder, 0700) + if tempDirError != nil { + return fmt.Errorf("failed to create temp dir %q: %v", c.TempFolder, tempDirError) + } + + c.OutputFolder = c.CleanPath(c.OutputFolder) + outDirError := os.MkdirAll(c.OutputFolder, 0700) + if outDirError != nil { + return fmt.Errorf("failed to create output dir %q: %v", c.OutputFolder, outDirError) + } + + c.AppFolder = c.CleanPath(c.AppFolder) + appDirError := c.checkDir(c.AppFolder, "app") + if appDirError != nil { + return appDirError + } + + c.BinFolder = c.CleanPath(c.BinFolder) + binDirError := c.checkDir(c.BinFolder, "bin") + if binDirError != nil { + return binDirError + } + + c.DataFolder = c.CleanPath(c.DataFolder) + dataDirError := c.checkDir(c.DataFolder, "data") + if dataDirError != nil { + return dataDirError + } + + return c.CheckServerFolder() +} + +func (c *Config) CheckServerFolder() error { + if c.ServerMode { + c.ServerFolder = c.CleanPath(c.ServerFolder) + serverDirError := c.checkDir(c.ServerFolder, "server") + if serverDirError != nil { + return serverDirError + } + + keyDirError := os.MkdirAll(filepath.Join(c.ServerFolder, c.KeyFolder), 0700) + if keyDirError != nil { + return fmt.Errorf("failed to create key dir %q: %v", filepath.Join(c.ServerFolder, c.KeyFolder), keyDirError) + } + } + + return nil +} + +func (c *Config) Merge(config Config, values map[string]any) { + for key := range values { + switch strings.ToLower(key) { + case strings.ToLower("Verbose"): + c.Verbose = config.Verbose + break + + case strings.ToLower("AppFolder"): + c.AppFolder = config.AppFolder + break + + case strings.ToLower("BinFolder"): + c.BinFolder = config.BinFolder + break + + case strings.ToLower("DataFolder"): + c.DataFolder = config.DataFolder + break + + case strings.ToLower("OutputFolder"): + c.OutputFolder = config.OutputFolder + break + + case strings.ToLower("ServerFolder"): + c.ServerFolder = config.ServerFolder + break + + case strings.ToLower("TempFolder"): + c.TempFolder = config.TempFolder + break + + case strings.ToLower("KeyFolder"): + c.KeyFolder = config.KeyFolder + break + + case strings.ToLower("InputFile"): + c.InputFile = config.InputFile + break + + case strings.ToLower("DataFlowDiagramFilenamePNG"): + c.DataFlowDiagramFilenamePNG = config.DataFlowDiagramFilenamePNG + break + + case strings.ToLower("DataAssetDiagramFilenamePNG"): + c.DataAssetDiagramFilenamePNG = config.DataAssetDiagramFilenamePNG + break + + case strings.ToLower("DataFlowDiagramFilenameDOT"): + c.DataFlowDiagramFilenameDOT = config.DataFlowDiagramFilenameDOT + break + + case strings.ToLower("DataAssetDiagramFilenameDOT"): + c.DataAssetDiagramFilenameDOT = config.DataAssetDiagramFilenameDOT + break + + case strings.ToLower("ReportFilename"): + c.ReportFilename = config.ReportFilename + break + + case strings.ToLower("ExcelRisksFilename"): + c.ExcelRisksFilename = config.ExcelRisksFilename + break + + case strings.ToLower("ExcelTagsFilename"): + c.ExcelTagsFilename = config.ExcelTagsFilename + break + + case strings.ToLower("JsonRisksFilename"): + c.JsonRisksFilename = config.JsonRisksFilename + break + + case strings.ToLower("JsonTechnicalAssetsFilename"): + c.JsonTechnicalAssetsFilename = config.JsonTechnicalAssetsFilename + break + + case strings.ToLower("JsonStatsFilename"): + c.JsonStatsFilename = config.JsonStatsFilename + break + + case strings.ToLower("TemplateFilename"): + c.TemplateFilename = config.TemplateFilename + break + + case strings.ToLower("RAAPlugin"): + c.RAAPlugin = config.RAAPlugin + break + + case strings.ToLower("RiskRulesPlugins"): + c.RiskRulesPlugins = config.RiskRulesPlugins + break + + case strings.ToLower("SkipRiskRules"): + c.SkipRiskRules = config.SkipRiskRules + break + + case strings.ToLower("ExecuteModelMacro"): + c.ExecuteModelMacro = config.ExecuteModelMacro + break + + case strings.ToLower("DiagramDPI"): + c.DiagramDPI = config.DiagramDPI + break + + case strings.ToLower("ServerPort"): + c.ServerPort = config.ServerPort + break + + case strings.ToLower("GraphvizDPI"): + c.GraphvizDPI = config.GraphvizDPI + break + + case strings.ToLower("MaxGraphvizDPI"): + c.MaxGraphvizDPI = config.MaxGraphvizDPI + break + + case strings.ToLower("BackupHistoryFilesToKeep"): + c.BackupHistoryFilesToKeep = config.BackupHistoryFilesToKeep + break + + case strings.ToLower("AddModelTitle"): + c.AddModelTitle = config.AddModelTitle + break + + case strings.ToLower("KeepDiagramSourceFiles"): + c.KeepDiagramSourceFiles = config.KeepDiagramSourceFiles + break + + case strings.ToLower("IgnoreOrphanedRiskTracking"): + c.IgnoreOrphanedRiskTracking = config.IgnoreOrphanedRiskTracking + break + + case strings.ToLower("Attractiveness"): + c.Attractiveness = config.Attractiveness + break + } + } +} + +func (c *Config) CleanPath(path string) string { + return filepath.Clean(c.ExpandPath(path)) +} + +func (c *Config) checkDir(dir string, name string) error { + dirInfo, dirError := os.Stat(dir) + if dirError != nil { + return fmt.Errorf("%v folder %q not good: %v", name, dir, dirError) + } + + if !dirInfo.IsDir() { + return fmt.Errorf("%v folder %q is not a folder", name, dir) + } + + return nil +} + +func (c *Config) ExpandPath(path string) string { + home := c.UserHomeDir() + if strings.HasPrefix(path, "~") { + path = strings.Replace(path, "~", home, 1) + } + + if strings.HasPrefix(path, "$HOME") { + path = strings.Replace(path, "$HOME", home, -1) + } + + return path +} + +func (c *Config) UserHomeDir() string { + switch runtime.GOOS { + case "windows": + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + + default: + return os.Getenv("HOME") + } +} diff --git a/pkg/common/consts.go b/pkg/common/consts.go new file mode 100644 index 00000000..f2b833a2 --- /dev/null +++ b/pkg/common/consts.go @@ -0,0 +1,50 @@ +package common + +const ( + TempDir = "/dev/shm" // TODO: make configurable via cmdline arg? + AppDir = "/app" + BinDir = "/app" + DataDir = "/data" + OutputDir = "." + ServerDir = "/server" + KeyDir = "keys" + + DefaultServerPort = 8080 + + InputFile = "threagile.yaml" + ReportFilename = "report.pdf" + ExcelRisksFilename = "risks.xlsx" + ExcelTagsFilename = "tags.xlsx" + JsonRisksFilename = "risks.json" + JsonTechnicalAssetsFilename = "technical-assets.json" + JsonStatsFilename = "stats.json" + TemplateFilename = "background.pdf" + DataFlowDiagramFilenameDOT = "data-flow-diagram.gv" + DataFlowDiagramFilenamePNG = "data-flow-diagram.png" + DataAssetDiagramFilenameDOT = "data-asset-diagram.gv" + DataAssetDiagramFilenamePNG = "data-asset-diagram.png" + + RAAPluginName = "raa_calc" + + DefaultGraphvizDPI = 120 + MinGraphvizDPI = 20 + MaxGraphvizDPI = 300 + DefaultBackupHistoryFilesToKeep = 50 +) + +const ( + QuitCommand = "quit" + AnalyzeModelCommand = "analyze-model" + CreateExampleModelCommand = "create-example-model" + CreateStubModelCommand = "create-stub-model" + CreateEditingSupportCommand = "create-editing-support" + PrintVersionCommand = "version" + ListTypesCommand = "list-types" + ListRiskRulesCommand = "list-risk-rules" + ListModelMacrosCommand = "list-model-macros" + ExplainTypesCommand = "explain-types" + ExplainRiskRulesCommand = "explain-risk-rules" + ExplainModelMacrosCommand = "explain-model-macros" + Print3rdPartyCommand = "print-3rd-party-licenses" + PrintLicenseCommand = "print-license" +) diff --git a/pkg/common/plugin-input.go b/pkg/common/plugin-input.go new file mode 100644 index 00000000..3a3cfb5f --- /dev/null +++ b/pkg/common/plugin-input.go @@ -0,0 +1,10 @@ +package common + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type PluginInput struct { + Config + types.ParsedModel +} diff --git a/pkg/common/progress-reporter.go b/pkg/common/progress-reporter.go new file mode 100644 index 00000000..5e61ad94 --- /dev/null +++ b/pkg/common/progress-reporter.go @@ -0,0 +1,33 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package common + +import ( + "fmt" + "log" +) + +type DefaultProgressReporter struct { + Verbose bool + SuppressError bool +} + +func (r DefaultProgressReporter) Info(a ...any) { + if r.Verbose { + fmt.Println(a...) + } +} + +func (DefaultProgressReporter) Warn(a ...any) { + fmt.Println(a...) +} + +func (r DefaultProgressReporter) Error(v ...any) { + if r.SuppressError { + r.Warn(v...) + return + } + log.Fatal(v...) +} diff --git a/pkg/docs/constants.go b/pkg/docs/constants.go new file mode 100644 index 00000000..32225cb7 --- /dev/null +++ b/pkg/docs/constants.go @@ -0,0 +1,46 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package docs + +import "github.com/threagile/threagile/pkg/common" + +const ( + ThreagileVersion = "1.0.0" // Also update into example and stub model files and openapi.yaml + Logo = " _____ _ _ _ \n |_ _| |__ _ __ ___ __ _ __ _(_) | ___ \n | | | '_ \\| '__/ _ \\/ _` |/ _` | | |/ _ \\\n | | | | | | | | __/ (_| | (_| | | | __/\n |_| |_| |_|_| \\___|\\__,_|\\__, |_|_|\\___|\n |___/ " + + "\nThreagile - Agile Threat Modeling" + VersionText = "Documentation: https://threagile.io\n" + + "Docker Images: https://hub.docker.com/r/threagile/threagile\n" + + "Sourcecode: https://github.com/threagile\n" + + "License: Open-Source (MIT License)" + + "Version: " + ThreagileVersion + " (%v)" + Examples = "Examples:\n\n" + + "If you want to create an example model (via docker) as a starting point to learn about Threagile just run: \n" + + " docker run --rm -it -v \"$(pwd)\":app/work threagile/threagile " + common.CreateExampleModelCommand + " -output app/work \n\n" + + "If you want to create a minimal stub model (via docker) as a starting point for your own model just run: \n" + + " docker run --rm -it -v \"$(pwd)\":app/work threagile/threagile " + common.CreateStubModelCommand + " -output app/work \n\n" + + "If you want to execute Threagile on a model yaml file (via docker): \n" + + " docker run --rm -it -v \"$(pwd)\":app/work threagile/threagile -verbose -model -output app/work \n\n" + + "If you want to run Threagile as a server (REST API) on some port (here 8080): \n" + + " docker run --rm -it --shm-size=256m -p 8080:8080 --name --mount 'type=volume,src=threagile-storage,dst=/data,readonly=false' threagile/threagile server --server-port 8080 \n\n" + + "If you want to find out about the different enum values usable in the model yaml file: \n" + + " docker run --rm -it threagile/threagile " + common.ListTypesCommand + "\n\n" + + "If you want to use some nice editing help (syntax validation, autocompletion, and live templates) in your favourite IDE: " + + " docker run --rm -it -v \"$(pwd)\":app/work threagile/threagile " + common.CreateEditingSupportCommand + " -output app/work\n\n" + + "If you want to list all available model macros (which are macros capable of reading a model yaml file, asking you questions in a wizard-style and then update the model yaml file accordingly): \n" + + " docker run --rm -it threagile/threagile " + common.ListModelMacrosCommand + " \n\n" + + "If you want to execute a certain model macro on the model yaml file (here the macro add-build-pipeline): \n" + + " docker run --rm -it -v \"$(pwd)\":app/work threagile/threagile -model app/work/threagile.yaml -output app/work execute-model-macro add-build-pipeline" + ThirdPartyLicenses = " - golang (Google Go License): https://golang.org/LICENSE\n" + + " - go-yaml (MIT License): https://github.com/go-yaml/yaml/blob/v3/LICENSE\n" + + " - graphviz (CPL License): https://graphviz.gitlab.io/license/\n" + + " - gofpdf (MIT License): https://github.com/jung-kurt/gofpdf/blob/master/LICENSE\n" + + " - go-chart (MIT License): https://github.com/wcharczuk/go-chart/blob/master/LICENSE\n" + + " - excelize (BSD License): https://github.com/qax-os/excelize/blob/master/LICENSE\n" + + " - graphics-go (BSD License): https://github.com/BurntSushi/graphics-go/blob/master/LICENSE\n" + + " - google-uuid (BSD License): https://github.com/google/uuid/blob/master/LICENSE\n" + + " - gin-gonic (MIT License): https://github.com/gin-gonic/gin/blob/master/LICENSE\n" + + " - swagger-ui (Apache License): https://swagger.io/license/\n" + + " - cobra-cli (Apache License): https://github.com/spf13/cobra-cli/blob/main/LICENSE.txt\n" +) diff --git a/pkg/examples/examples.go b/pkg/examples/examples.go new file mode 100644 index 00000000..c335ee2d --- /dev/null +++ b/pkg/examples/examples.go @@ -0,0 +1,75 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package examples + +import ( + "fmt" + "github.com/threagile/threagile/pkg/common" + "io" + "os" + "path/filepath" +) + +func CreateExampleModelFile(appFolder, outputDir string) error { + _, err := copyFile(filepath.Join(appFolder, "threagile-example-model.yaml"), filepath.Join(outputDir, "threagile-example-model.yaml")) + if err == nil { + return nil + } + + _, altError := copyFile(filepath.Join(appFolder, common.InputFile), filepath.Join(outputDir, "threagile-example-model.yaml")) + if altError != nil { + return err + } + + return nil +} + +func CreateStubModelFile(appFolder, outputDir string) error { + _, err := copyFile(filepath.Join(appFolder, "threagile-stub-model.yaml"), filepath.Join(outputDir, "threagile-stub-model.yaml")) + if err == nil { + return nil + } + + _, altError := copyFile(filepath.Join(appFolder, common.InputFile), filepath.Join(outputDir, "threagile-stub-model.yaml")) + if altError != nil { + return err + } + + return nil +} + +func CreateEditingSupportFiles(appFolder, outputDir string) error { + _, schemaError := copyFile(filepath.Join(appFolder, "schema.json"), filepath.Join(outputDir, "schema.json")) + if schemaError != nil { + return schemaError + } + + _, templateError := copyFile(filepath.Join(appFolder, "live-templates.txt"), filepath.Join(outputDir, "live-templates.txt")) + return templateError +} + +func copyFile(src, dst string) (int64, error) { + sourceFileStat, err := os.Stat(src) + if err != nil { + return 0, err + } + + if !sourceFileStat.Mode().IsRegular() { + return 0, fmt.Errorf("%s is not a regular file", src) + } + + source, err := os.Open(filepath.Clean(src)) + if err != nil { + return 0, err + } + defer func() { _ = source.Close() }() + + destination, err := os.Create(filepath.Clean(dst)) + if err != nil { + return 0, err + } + defer func() { _ = destination.Close() }() + nBytes, err := io.Copy(destination, source) + return nBytes, err +} diff --git a/pkg/input/author.go b/pkg/input/author.go new file mode 100644 index 00000000..834fc347 --- /dev/null +++ b/pkg/input/author.go @@ -0,0 +1,68 @@ +package input + +import ( + "fmt" + "sort" + "strings" +) + +type Author struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Contact string `yaml:"contact,omitempty" json:"contact,omitempty"` + Homepage string `yaml:"homepage,omitempty" json:"homepage,omitempty"` +} + +func (what *Author) Merge(other Author) error { + if len(what.Name) > 0 && !strings.EqualFold(what.Name, other.Name) { + return fmt.Errorf("author name mismatch") + } + + if len(what.Contact) > 0 && !strings.EqualFold(what.Contact, other.Contact) { + return fmt.Errorf("author contact mismatch") + } + + if len(what.Homepage) > 0 && !strings.EqualFold(what.Homepage, other.Homepage) { + return fmt.Errorf("author homepage mismatch") + } + + what.Name = other.Name + what.Contact = other.Contact + what.Homepage = other.Homepage + + return nil +} + +func (what *Author) MergeList(list []Author) ([]Author, error) { + sort.Slice(list, func(i int, j int) bool { + return strings.Compare(list[i].Name, list[j].Name) < 0 + }) + + if len(list) < 2 { + return list, nil + } + + first := list[0] + tail, mergeError := what.MergeList(list[1:]) + if mergeError != nil { + return nil, mergeError + } + + newList := make([]Author, 1) + newList[0] = first + for _, second := range tail { + if first.Match(second) { + mergeError = first.Merge(second) + if mergeError != nil { + return nil, mergeError + } + } else { + newList = append(newList, second) + } + } + + return newList, nil +} + +func (what *Author) Match(other Author) bool { + return strings.EqualFold(what.Name, other.Name) +} diff --git a/pkg/input/communication-link.go b/pkg/input/communication-link.go new file mode 100644 index 00000000..01070fde --- /dev/null +++ b/pkg/input/communication-link.go @@ -0,0 +1,99 @@ +package input + +import "fmt" + +type CommunicationLink struct { + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` + Authentication string `yaml:"authentication,omitempty" json:"authentication,omitempty"` + Authorization string `yaml:"authorization,omitempty" json:"authorization,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + VPN bool `yaml:"vpn,omitempty" json:"vpn,omitempty"` + IpFiltered bool `yaml:"ip_filtered,omitempty" json:"ip_filtered,omitempty"` + Readonly bool `yaml:"readonly,omitempty" json:"readonly,omitempty"` + Usage string `yaml:"usage,omitempty" json:"usage,omitempty"` + DataAssetsSent []string `yaml:"data_assets_sent,omitempty" json:"data_assets_sent,omitempty"` + DataAssetsReceived []string `yaml:"data_assets_received,omitempty" json:"data_assets_received,omitempty"` + DiagramTweakWeight int `yaml:"diagram_tweak_weight,omitempty" json:"diagram_tweak_weight,omitempty"` + DiagramTweakConstraint bool `yaml:"diagram_tweak_constraint,omitempty" json:"diagram_tweak_constraint,omitempty"` +} + +func (what *CommunicationLink) Merge(other CommunicationLink) error { + var mergeError error + what.Target, mergeError = new(Strings).MergeSingleton(what.Target, other.Target) + if mergeError != nil { + return fmt.Errorf("failed to merge target: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Protocol, mergeError = new(Strings).MergeSingleton(what.Protocol, other.Protocol) + if mergeError != nil { + return fmt.Errorf("failed to merge protocol: %v", mergeError) + } + + what.Authentication, mergeError = new(Strings).MergeSingleton(what.Authentication, other.Authentication) + if mergeError != nil { + return fmt.Errorf("failed to merge authentication: %v", mergeError) + } + + what.Authorization, mergeError = new(Strings).MergeSingleton(what.Authorization, other.Authorization) + if mergeError != nil { + return fmt.Errorf("failed to merge authorization: %v", mergeError) + } + + what.Tags = new(Strings).MergeUniqueSlice(what.Tags, other.Tags) + + if what.VPN == false { + what.VPN = other.VPN + } + + if what.IpFiltered == false { + what.IpFiltered = other.IpFiltered + } + + if what.Readonly == false { + what.Readonly = other.Readonly + } + + what.Usage, mergeError = new(Strings).MergeSingleton(what.Usage, other.Usage) + if mergeError != nil { + return fmt.Errorf("failed to merge usage: %v", mergeError) + } + + what.DataAssetsSent = new(Strings).MergeUniqueSlice(what.DataAssetsSent, other.DataAssetsSent) + + what.DataAssetsReceived = new(Strings).MergeUniqueSlice(what.DataAssetsReceived, other.DataAssetsReceived) + + if what.DiagramTweakWeight == 0 { + what.DiagramTweakWeight = other.DiagramTweakWeight + } + + if what.DiagramTweakConstraint == false { + what.DiagramTweakConstraint = other.DiagramTweakConstraint + } + + return nil +} + +func (what *CommunicationLink) MergeMap(first map[string]CommunicationLink, second map[string]CommunicationLink) (map[string]CommunicationLink, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge commuinication link %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/data-asset.go b/pkg/input/data-asset.go new file mode 100644 index 00000000..e686f390 --- /dev/null +++ b/pkg/input/data-asset.go @@ -0,0 +1,89 @@ +package input + +import "fmt" + +type DataAsset struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Usage string `yaml:"usage,omitempty" json:"usage,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + Origin string `yaml:"origin,omitempty" json:"origin,omitempty"` + Owner string `yaml:"owner,omitempty" json:"owner,omitempty"` + Quantity string `yaml:"quantity,omitempty" json:"quantity,omitempty"` + Confidentiality string `yaml:"confidentiality,omitempty" json:"confidentiality,omitempty"` + Integrity string `yaml:"integrity,omitempty" json:"integrity,omitempty"` + Availability string `yaml:"availability,omitempty" json:"availability,omitempty"` + JustificationCiaRating string `yaml:"justification_cia_rating,omitempty" json:"justification_cia_rating,omitempty"` +} + +func (what *DataAsset) Merge(other DataAsset) error { + var mergeError error + what.ID, mergeError = new(Strings).MergeSingleton(what.ID, other.ID) + if mergeError != nil { + return fmt.Errorf("failed to merge id: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Usage, mergeError = new(Strings).MergeSingleton(what.Usage, other.Usage) + if mergeError != nil { + return fmt.Errorf("failed to merge usage: %v", mergeError) + } + + what.Tags = new(Strings).MergeUniqueSlice(what.Tags, other.Tags) + + what.Origin, mergeError = new(Strings).MergeSingleton(what.Origin, other.Origin) + if mergeError != nil { + return fmt.Errorf("failed to merge origin: %v", mergeError) + } + + what.Owner, mergeError = new(Strings).MergeSingleton(what.Owner, other.Owner) + if mergeError != nil { + return fmt.Errorf("failed to merge owner: %v", mergeError) + } + + what.Quantity, mergeError = new(Strings).MergeSingleton(what.Quantity, other.Quantity) + if mergeError != nil { + return fmt.Errorf("failed to merge quantity: %v", mergeError) + } + + what.Confidentiality, mergeError = new(Strings).MergeSingleton(what.Confidentiality, other.Confidentiality) + if mergeError != nil { + return fmt.Errorf("failed to merge confidentiality: %v", mergeError) + } + + what.Integrity, mergeError = new(Strings).MergeSingleton(what.Integrity, other.Integrity) + if mergeError != nil { + return fmt.Errorf("failed to merge integrity: %v", mergeError) + } + + what.Availability, mergeError = new(Strings).MergeSingleton(what.Availability, other.Availability) + if mergeError != nil { + return fmt.Errorf("failed to merge availability: %v", mergeError) + } + + what.JustificationCiaRating = new(Strings).MergeMultiline(what.JustificationCiaRating, other.JustificationCiaRating) + + return nil +} + +func (what *DataAsset) MergeMap(first map[string]DataAsset, second map[string]DataAsset) (map[string]DataAsset, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge data asset %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/model.go b/pkg/input/model.go new file mode 100644 index 00000000..2d6db1c2 --- /dev/null +++ b/pkg/input/model.go @@ -0,0 +1,303 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package input + +import ( + "fmt" + "github.com/mpvl/unique" + "log" + "os" + "path/filepath" + "slices" + "sort" + "strings" + + "gopkg.in/yaml.v3" +) + +// === Model Type Stuff ====================================== + +type Model struct { // TODO: Eventually remove this and directly use ParsedModelRoot? But then the error messages for model errors are not quite as good anymore... + ThreagileVersion string `yaml:"threagile_version,omitempty" json:"threagile_version,omitempty"` + Includes []string `yaml:"includes,omitempty" json:"includes,omitempty"` + Title string `yaml:"title,omitempty" json:"title,omitempty"` + Author Author `yaml:"author,omitempty" json:"author,omitempty"` + Contributors []Author `yaml:"contributors,omitempty" json:"contributors,omitempty"` + Date string `yaml:"date,omitempty" json:"date,omitempty"` + AppDescription Overview `yaml:"application_description,omitempty" json:"application_description,omitempty"` + BusinessOverview Overview `yaml:"business_overview,omitempty" json:"business_overview,omitempty"` + TechnicalOverview Overview `yaml:"technical_overview,omitempty" json:"technical_overview,omitempty"` + BusinessCriticality string `yaml:"business_criticality,omitempty" json:"business_criticality,omitempty"` + ManagementSummaryComment string `yaml:"management_summary_comment,omitempty" json:"management_summary_comment,omitempty"` + SecurityRequirements map[string]string `yaml:"security_requirements,omitempty" json:"security_requirements,omitempty"` + Questions map[string]string `yaml:"questions,omitempty" json:"questions,omitempty"` + AbuseCases map[string]string `yaml:"abuse_cases,omitempty" json:"abuse_cases,omitempty"` + TagsAvailable []string `yaml:"tags_available,omitempty" json:"tags_available,omitempty"` + DataAssets map[string]DataAsset `yaml:"data_assets,omitempty" json:"data_assets,omitempty"` + TechnicalAssets map[string]TechnicalAsset `yaml:"technical_assets,omitempty" json:"technical_assets,omitempty"` + TrustBoundaries map[string]TrustBoundary `yaml:"trust_boundaries,omitempty" json:"trust_boundaries,omitempty"` + SharedRuntimes map[string]SharedRuntime `yaml:"shared_runtimes,omitempty" json:"shared_runtimes,omitempty"` + IndividualRiskCategories map[string]IndividualRiskCategory `yaml:"individual_risk_categories,omitempty" json:"individual_risk_categories,omitempty"` + RiskTracking map[string]RiskTracking `yaml:"risk_tracking,omitempty" json:"risk_tracking,omitempty"` + DiagramTweakNodesep int `yaml:"diagram_tweak_nodesep,omitempty" json:"diagram_tweak_nodesep,omitempty"` + DiagramTweakRanksep int `yaml:"diagram_tweak_ranksep,omitempty" json:"diagram_tweak_ranksep,omitempty"` + DiagramTweakEdgeLayout string `yaml:"diagram_tweak_edge_layout,omitempty" json:"diagram_tweak_edge_layout,omitempty"` + DiagramTweakSuppressEdgeLabels bool `yaml:"diagram_tweak_suppress_edge_labels,omitempty" json:"diagram_tweak_suppress_edge_labels,omitempty"` + DiagramTweakLayoutLeftToRight bool `yaml:"diagram_tweak_layout_left_to_right,omitempty" json:"diagram_tweak_layout_left_to_right,omitempty"` + DiagramTweakInvisibleConnectionsBetweenAssets []string `yaml:"diagram_tweak_invisible_connections_between_assets,omitempty" json:"diagram_tweak_invisible_connections_between_assets,omitempty"` + DiagramTweakSameRankAssets []string `yaml:"diagram_tweak_same_rank_assets,omitempty" json:"diagram_tweak_same_rank_assets,omitempty"` +} + +func (model *Model) Defaults() *Model { + *model = Model{ + Questions: make(map[string]string), + AbuseCases: make(map[string]string), + SecurityRequirements: make(map[string]string), + DataAssets: make(map[string]DataAsset), + TechnicalAssets: make(map[string]TechnicalAsset), + TrustBoundaries: make(map[string]TrustBoundary), + SharedRuntimes: make(map[string]SharedRuntime), + IndividualRiskCategories: make(map[string]IndividualRiskCategory), + RiskTracking: make(map[string]RiskTracking), + } + + return model +} + +func (model *Model) Load(inputFilename string) error { + modelYaml, readError := os.ReadFile(filepath.Clean(inputFilename)) + if readError != nil { + log.Fatal("Unable to read model file: ", readError) + } + + unmarshalError := yaml.Unmarshal(modelYaml, &model) + if unmarshalError != nil { + log.Fatal("Unable to parse model yaml: ", unmarshalError) + } + + for _, includeFile := range model.Includes { + mergeError := model.Merge(filepath.Dir(inputFilename), includeFile) + if mergeError != nil { + log.Fatalf("Unable to merge model include %q: %v", includeFile, mergeError) + } + } + + return nil +} + +func (model *Model) Merge(dir string, includeFilename string) error { + modelYaml, readError := os.ReadFile(filepath.Clean(filepath.Join(dir, includeFilename))) + if readError != nil { + return fmt.Errorf("unable to read model file: %v", readError) + } + + var fileStructure map[string]any + unmarshalStructureError := yaml.Unmarshal(modelYaml, &fileStructure) + if unmarshalStructureError != nil { + return fmt.Errorf("unable to parse model structure: %v", unmarshalStructureError) + } + + var includedModel Model + unmarshalError := yaml.Unmarshal(modelYaml, &includedModel) + if unmarshalError != nil { + return fmt.Errorf("unable to parse model yaml: %v", unmarshalError) + } + + var mergeError error + for item := range fileStructure { + switch strings.ToLower(item) { + case strings.ToLower("includes"): + for _, includeFile := range includedModel.Includes { + mergeError = model.Merge(filepath.Join(dir, filepath.Dir(includeFilename)), includeFile) + if mergeError != nil { + return fmt.Errorf("failed to merge model include %q: %v", includeFile, mergeError) + } + } + break + + case strings.ToLower("threagile_version"): + model.ThreagileVersion, mergeError = new(Strings).MergeSingleton(model.ThreagileVersion, includedModel.ThreagileVersion) + if mergeError != nil { + return fmt.Errorf("failed to merge threagile version: %v", mergeError) + } + break + + case strings.ToLower("title"): + model.Title, mergeError = new(Strings).MergeSingleton(model.Title, includedModel.Title) + if mergeError != nil { + return fmt.Errorf("failed to merge title: %v", mergeError) + } + break + + case strings.ToLower("author"): + mergeError = model.Author.Merge(includedModel.Author) + if mergeError != nil { + return fmt.Errorf("failed to merge author: %v", mergeError) + } + break + + case strings.ToLower("contributors"): + model.Contributors, mergeError = new(Author).MergeList(append(model.Contributors, includedModel.Author)) + if mergeError != nil { + return fmt.Errorf("failed to merge contributors: %v", mergeError) + } + break + + case strings.ToLower("date"): + model.Date, mergeError = new(Strings).MergeSingleton(model.Date, includedModel.Date) + if mergeError != nil { + return fmt.Errorf("failed to merge date: %v", mergeError) + } + break + + case strings.ToLower("application_description"): + mergeError = model.AppDescription.Merge(includedModel.AppDescription) + if mergeError != nil { + return fmt.Errorf("failed to merge application description: %v", mergeError) + } + break + + case strings.ToLower("business_overview"): + mergeError = model.BusinessOverview.Merge(includedModel.BusinessOverview) + if mergeError != nil { + return fmt.Errorf("failed to merge business overview: %v", mergeError) + } + break + + case strings.ToLower("technical_overview"): + mergeError = model.TechnicalOverview.Merge(includedModel.TechnicalOverview) + if mergeError != nil { + return fmt.Errorf("failed to merge technical overview: %v", mergeError) + } + break + + case strings.ToLower("business_criticality"): + model.BusinessCriticality, mergeError = new(Strings).MergeSingleton(model.BusinessCriticality, includedModel.BusinessCriticality) + if mergeError != nil { + return fmt.Errorf("failed to merge business criticality: %v", mergeError) + } + break + + case strings.ToLower("management_summary_comment"): + model.ManagementSummaryComment = new(Strings).MergeMultiline(model.ManagementSummaryComment, includedModel.ManagementSummaryComment) + break + + case strings.ToLower("security_requirements"): + model.SecurityRequirements, mergeError = new(Strings).MergeMap(model.SecurityRequirements, includedModel.SecurityRequirements) + if mergeError != nil { + return fmt.Errorf("failed to merge security requirements: %v", mergeError) + } + break + + case strings.ToLower("questions"): + model.Questions, mergeError = new(Strings).MergeMap(model.Questions, includedModel.Questions) + if mergeError != nil { + return fmt.Errorf("failed to merge questions: %v", mergeError) + } + break + + case strings.ToLower("abuse_cases"): + model.AbuseCases, mergeError = new(Strings).MergeMap(model.AbuseCases, includedModel.AbuseCases) + if mergeError != nil { + return fmt.Errorf("failed to merge abuse cases: %v", mergeError) + } + break + + case strings.ToLower("tags_available"): + model.TagsAvailable = new(Strings).MergeUniqueSlice(model.TagsAvailable, includedModel.TagsAvailable) + break + + case strings.ToLower("data_assets"): + model.DataAssets, mergeError = new(DataAsset).MergeMap(model.DataAssets, includedModel.DataAssets) + if mergeError != nil { + return fmt.Errorf("failed to merge data assets: %v", mergeError) + } + break + + case strings.ToLower("technical_assets"): + model.TechnicalAssets, mergeError = new(TechnicalAsset).MergeMap(model.TechnicalAssets, includedModel.TechnicalAssets) + if mergeError != nil { + return fmt.Errorf("failed to merge technical assets: %v", mergeError) + } + break + + case strings.ToLower("trust_boundaries"): + model.TrustBoundaries, mergeError = new(TrustBoundary).MergeMap(model.TrustBoundaries, includedModel.TrustBoundaries) + if mergeError != nil { + return fmt.Errorf("failed to merge trust boundaries: %v", mergeError) + } + break + + case strings.ToLower("shared_runtimes"): + model.SharedRuntimes, mergeError = new(SharedRuntime).MergeMap(model.SharedRuntimes, includedModel.SharedRuntimes) + if mergeError != nil { + return fmt.Errorf("failed to merge shared runtimes: %v", mergeError) + } + break + + case strings.ToLower("individual_risk_categories"): + model.IndividualRiskCategories, mergeError = new(IndividualRiskCategory).MergeMap(model.IndividualRiskCategories, includedModel.IndividualRiskCategories) + if mergeError != nil { + return fmt.Errorf("failed to merge risk categories: %v", mergeError) + } + break + + case strings.ToLower("risk_tracking"): + model.RiskTracking, mergeError = new(RiskTracking).MergeMap(model.RiskTracking, includedModel.RiskTracking) + if mergeError != nil { + return fmt.Errorf("failed to merge risk tracking: %v", mergeError) + } + break + + case "diagram_tweak_nodesep": + model.DiagramTweakNodesep = includedModel.DiagramTweakNodesep + break + + case "diagram_tweak_ranksep": + model.DiagramTweakRanksep = includedModel.DiagramTweakRanksep + break + + case "diagram_tweak_edge_layout": + model.DiagramTweakEdgeLayout = includedModel.DiagramTweakEdgeLayout + break + + case "diagram_tweak_suppress_edge_labels": + model.DiagramTweakSuppressEdgeLabels = includedModel.DiagramTweakSuppressEdgeLabels + break + + case "diagram_tweak_layout_left_to_right": + model.DiagramTweakLayoutLeftToRight = includedModel.DiagramTweakLayoutLeftToRight + break + + case "diagram_tweak_invisible_connections_between_assets": + model.DiagramTweakInvisibleConnectionsBetweenAssets = append(model.DiagramTweakInvisibleConnectionsBetweenAssets, includedModel.DiagramTweakInvisibleConnectionsBetweenAssets...) + sort.Strings(model.DiagramTweakInvisibleConnectionsBetweenAssets) + unique.Strings(&model.DiagramTweakInvisibleConnectionsBetweenAssets) + break + + case "diagram_tweak_same_rank_assets": + model.DiagramTweakSameRankAssets = append(model.DiagramTweakSameRankAssets, includedModel.DiagramTweakSameRankAssets...) + sort.Strings(model.DiagramTweakSameRankAssets) + unique.Strings(&model.DiagramTweakSameRankAssets) + } + } + + return nil +} + +func (model *Model) AddTagToModelInput(tag string, dryRun bool, changes *[]string) { + tag = NormalizeTag(tag) + + if !slices.Contains(model.TagsAvailable, tag) { + *changes = append(*changes, "adding tag: "+tag) + if !dryRun { + model.TagsAvailable = append(model.TagsAvailable, tag) + } + } +} + +func NormalizeTag(tag string) string { + return strings.TrimSpace(strings.ToLower(tag)) +} diff --git a/pkg/input/overview.go b/pkg/input/overview.go new file mode 100644 index 00000000..77820b28 --- /dev/null +++ b/pkg/input/overview.go @@ -0,0 +1,20 @@ +package input + +type Overview struct { + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Images []map[string]string `yaml:"images,omitempty" json:"images,omitempty"` // yes, array of map here, as array keeps the order of the image keys +} + +func (what *Overview) Merge(other Overview) error { + if len(what.Description) > 0 { + if len(other.Description) > 0 { + what.Description += lineSeparator + other.Description + } + } else { + what.Description = other.Description + } + + what.Images = append(what.Images, other.Images...) + + return nil +} diff --git a/pkg/input/risk-category.go b/pkg/input/risk-category.go new file mode 100644 index 00000000..9d94047d --- /dev/null +++ b/pkg/input/risk-category.go @@ -0,0 +1,125 @@ +package input + +import ( + "fmt" +) + +type IndividualRiskCategory struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Impact string `yaml:"impact,omitempty" json:"impact,omitempty"` + ASVS string `yaml:"asvs,omitempty" json:"asvs,omitempty"` + CheatSheet string `yaml:"cheat_sheet,omitempty" json:"cheat_sheet,omitempty"` + Action string `yaml:"action,omitempty" json:"action,omitempty"` + Mitigation string `yaml:"mitigation,omitempty" json:"mitigation,omitempty"` + Check string `yaml:"check,omitempty" json:"check,omitempty"` + Function string `yaml:"function,omitempty" json:"function,omitempty"` + STRIDE string `yaml:"stride,omitempty" json:"stride,omitempty"` + DetectionLogic string `yaml:"detection_logic,omitempty" json:"detection_logic,omitempty"` + RiskAssessment string `yaml:"risk_assessment,omitempty" json:"risk_assessment,omitempty"` + FalsePositives string `yaml:"false_positives,omitempty" json:"false_positives,omitempty"` + ModelFailurePossibleReason bool `yaml:"model_failure_possible_reason,omitempty" json:"model_failure_possible_reason,omitempty"` + CWE int `yaml:"cwe,omitempty" json:"cwe,omitempty"` + RisksIdentified map[string]RiskIdentified `yaml:"risks_identified,omitempty" json:"risks_identified,omitempty"` +} + +func (what *IndividualRiskCategory) Merge(other IndividualRiskCategory) error { + var mergeError error + what.ID, mergeError = new(Strings).MergeSingleton(what.ID, other.ID) + if mergeError != nil { + return fmt.Errorf("failed to merge id: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Impact, mergeError = new(Strings).MergeSingleton(what.Impact, other.Impact) + if mergeError != nil { + return fmt.Errorf("failed to merge impact: %v", mergeError) + } + + what.ASVS, mergeError = new(Strings).MergeSingleton(what.ASVS, other.ASVS) + if mergeError != nil { + return fmt.Errorf("failed to merge asvs: %v", mergeError) + } + + what.CheatSheet, mergeError = new(Strings).MergeSingleton(what.CheatSheet, other.CheatSheet) + if mergeError != nil { + return fmt.Errorf("failed to merge cheat_sheet: %v", mergeError) + } + + what.Action, mergeError = new(Strings).MergeSingleton(what.Action, other.Action) + if mergeError != nil { + return fmt.Errorf("failed to merge action: %v", mergeError) + } + + what.Mitigation, mergeError = new(Strings).MergeSingleton(what.Mitigation, other.Mitigation) + if mergeError != nil { + return fmt.Errorf("failed to merge mitigation: %v", mergeError) + } + + what.Check, mergeError = new(Strings).MergeSingleton(what.Check, other.Check) + if mergeError != nil { + return fmt.Errorf("failed to merge check: %v", mergeError) + } + + what.Function, mergeError = new(Strings).MergeSingleton(what.Function, other.Function) + if mergeError != nil { + return fmt.Errorf("failed to merge function: %v", mergeError) + } + + what.STRIDE, mergeError = new(Strings).MergeSingleton(what.STRIDE, other.STRIDE) + if mergeError != nil { + return fmt.Errorf("failed to merge STRIDE: %v", mergeError) + } + + what.DetectionLogic, mergeError = new(Strings).MergeSingleton(what.DetectionLogic, other.DetectionLogic) + if mergeError != nil { + return fmt.Errorf("failed to merge detection_logic: %v", mergeError) + } + + what.RiskAssessment, mergeError = new(Strings).MergeSingleton(what.RiskAssessment, other.RiskAssessment) + if mergeError != nil { + return fmt.Errorf("failed to merge risk_assessment: %v", mergeError) + } + + what.FalsePositives, mergeError = new(Strings).MergeSingleton(what.FalsePositives, other.FalsePositives) + if mergeError != nil { + return fmt.Errorf("failed to merge false_positives: %v", mergeError) + } + + if what.ModelFailurePossibleReason == false { + what.ModelFailurePossibleReason = other.ModelFailurePossibleReason + } + + if what.CWE == 0 { + what.CWE = other.CWE + } + + what.RisksIdentified, mergeError = new(RiskIdentified).MergeMap(what.RisksIdentified, other.RisksIdentified) + if mergeError != nil { + return fmt.Errorf("failed to merge identified risks: %v", mergeError) + } + + return nil +} + +func (what *IndividualRiskCategory) MergeMap(first map[string]IndividualRiskCategory, second map[string]IndividualRiskCategory) (map[string]IndividualRiskCategory, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge risk category %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/risk-tracking.go b/pkg/input/risk-tracking.go new file mode 100644 index 00000000..59946047 --- /dev/null +++ b/pkg/input/risk-tracking.go @@ -0,0 +1,59 @@ +package input + +import "fmt" + +type RiskTracking struct { + Status string `yaml:"status,omitempty" json:"status,omitempty"` + Justification string `yaml:"justification,omitempty" json:"justification,omitempty"` + Ticket string `yaml:"ticket,omitempty" json:"ticket,omitempty"` + Date string `yaml:"date,omitempty" json:"date,omitempty"` + CheckedBy string `yaml:"checked_by,omitempty" json:"checked_by,omitempty"` +} + +func (what *RiskTracking) Merge(other RiskTracking) error { + var mergeError error + what.Status, mergeError = new(Strings).MergeSingleton(what.Status, other.Status) + if mergeError != nil { + return fmt.Errorf("failed to merge status: %v", mergeError) + } + + what.Justification, mergeError = new(Strings).MergeSingleton(what.Justification, other.Justification) + if mergeError != nil { + return fmt.Errorf("failed to merge justification: %v", mergeError) + } + + what.Ticket, mergeError = new(Strings).MergeSingleton(what.Ticket, other.Ticket) + if mergeError != nil { + return fmt.Errorf("failed to merge ticket: %v", mergeError) + } + + what.Date, mergeError = new(Strings).MergeSingleton(what.Date, other.Date) + if mergeError != nil { + return fmt.Errorf("failed to merge date: %v", mergeError) + } + + what.CheckedBy, mergeError = new(Strings).MergeSingleton(what.CheckedBy, other.CheckedBy) + if mergeError != nil { + return fmt.Errorf("failed to merge checked_by: %v", mergeError) + } + + return nil +} + +func (what *RiskTracking) MergeMap(first map[string]RiskTracking, second map[string]RiskTracking) (map[string]RiskTracking, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge risk tracking %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/risk.go b/pkg/input/risk.go new file mode 100644 index 00000000..d36e5174 --- /dev/null +++ b/pkg/input/risk.go @@ -0,0 +1,86 @@ +package input + +import "fmt" + +type RiskIdentified struct { + Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` + ExploitationLikelihood string `yaml:"exploitation_likelihood,omitempty" json:"exploitation_likelihood,omitempty"` + ExploitationImpact string `yaml:"exploitation_impact,omitempty" json:"exploitation_impact,omitempty"` + DataBreachProbability string `yaml:"data_breach_probability,omitempty" json:"data_breach_probability,omitempty"` + DataBreachTechnicalAssets []string `yaml:"data_breach_technical_assets,omitempty" json:"data_breach_technical_assets,omitempty"` + MostRelevantDataAsset string `yaml:"most_relevant_data_asset,omitempty" json:"most_relevant_data_asset,omitempty"` + MostRelevantTechnicalAsset string `yaml:"most_relevant_technical_asset,omitempty" json:"most_relevant_technical_asset,omitempty"` + MostRelevantCommunicationLink string `yaml:"most_relevant_communication_link,omitempty" json:"most_relevant_communication_link,omitempty"` + MostRelevantTrustBoundary string `yaml:"most_relevant_trust_boundary,omitempty" json:"most_relevant_trust_boundary,omitempty"` + MostRelevantSharedRuntime string `yaml:"most_relevant_shared_runtime,omitempty" json:"most_relevant_shared_runtime,omitempty"` +} + +func (what *RiskIdentified) Merge(other RiskIdentified) error { + var mergeError error + what.Severity, mergeError = new(Strings).MergeSingleton(what.Severity, other.Severity) + if mergeError != nil { + return fmt.Errorf("failed to merge severity: %v", mergeError) + } + + what.ExploitationLikelihood, mergeError = new(Strings).MergeSingleton(what.ExploitationLikelihood, other.ExploitationLikelihood) + if mergeError != nil { + return fmt.Errorf("failed to merge exploitation_likelihood: %v", mergeError) + } + + what.ExploitationImpact, mergeError = new(Strings).MergeSingleton(what.ExploitationImpact, other.ExploitationImpact) + if mergeError != nil { + return fmt.Errorf("failed to merge exploitation_impact: %v", mergeError) + } + + what.DataBreachProbability, mergeError = new(Strings).MergeSingleton(what.DataBreachProbability, other.DataBreachProbability) + if mergeError != nil { + return fmt.Errorf("failed to merge date: %v", mergeError) + } + + what.DataBreachTechnicalAssets = new(Strings).MergeUniqueSlice(what.DataBreachTechnicalAssets, other.DataBreachTechnicalAssets) + + what.MostRelevantDataAsset, mergeError = new(Strings).MergeSingleton(what.MostRelevantDataAsset, other.MostRelevantDataAsset) + if mergeError != nil { + return fmt.Errorf("failed to merge most_relevant_data_asset: %v", mergeError) + } + + what.MostRelevantTechnicalAsset, mergeError = new(Strings).MergeSingleton(what.MostRelevantTechnicalAsset, other.MostRelevantTechnicalAsset) + if mergeError != nil { + return fmt.Errorf("failed to merge most_relevant_technical_asset: %v", mergeError) + } + + what.MostRelevantCommunicationLink, mergeError = new(Strings).MergeSingleton(what.MostRelevantCommunicationLink, other.MostRelevantCommunicationLink) + if mergeError != nil { + return fmt.Errorf("failed to merge most_relevant_communication_link: %v", mergeError) + } + + what.MostRelevantTrustBoundary, mergeError = new(Strings).MergeSingleton(what.MostRelevantTrustBoundary, other.MostRelevantTrustBoundary) + if mergeError != nil { + return fmt.Errorf("failed to merge most_relevant_trust_boundary: %v", mergeError) + } + + what.MostRelevantSharedRuntime, mergeError = new(Strings).MergeSingleton(what.MostRelevantSharedRuntime, other.MostRelevantSharedRuntime) + if mergeError != nil { + return fmt.Errorf("failed to merge most_relevant_shared_runtime: %v", mergeError) + } + + return nil +} + +func (what *RiskIdentified) MergeMap(first map[string]RiskIdentified, second map[string]RiskIdentified) (map[string]RiskIdentified, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge risk %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/shared-runtime.go b/pkg/input/shared-runtime.go new file mode 100644 index 00000000..caa43c2e --- /dev/null +++ b/pkg/input/shared-runtime.go @@ -0,0 +1,47 @@ +package input + +import "fmt" + +type SharedRuntime struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tag,omitemptys"` + TechnicalAssetsRunning []string `yaml:"technical_assets_running,omitempty" json:"technical_assets_running,omitempty"` +} + +func (what *SharedRuntime) Merge(other SharedRuntime) error { + var mergeError error + what.ID, mergeError = new(Strings).MergeSingleton(what.ID, other.ID) + if mergeError != nil { + return fmt.Errorf("failed to merge id: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Tags = new(Strings).MergeUniqueSlice(what.Tags, other.Tags) + + what.TechnicalAssetsRunning = new(Strings).MergeUniqueSlice(what.TechnicalAssetsRunning, other.TechnicalAssetsRunning) + + return nil +} + +func (what *SharedRuntime) MergeMap(first map[string]SharedRuntime, second map[string]SharedRuntime) (map[string]SharedRuntime, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge shared runtime %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/strings.go b/pkg/input/strings.go new file mode 100644 index 00000000..d1dfeea3 --- /dev/null +++ b/pkg/input/strings.go @@ -0,0 +1,64 @@ +package input + +import ( + "fmt" + "slices" + "strings" +) + +const ( + lineSeparator = "\n

\n" +) + +type Strings struct { +} + +func (what *Strings) MergeSingleton(first string, second string) (string, error) { + if len(first) > 0 { + if len(second) > 0 { + if !strings.EqualFold(first, second) { + return first, fmt.Errorf("conflicting string values: %q versus %q", first, second) + } + } + + return first, nil + } + + return second, nil +} + +func (what *Strings) MergeMultiline(first string, second string) string { + text := first + if len(first) > 0 { + if len(second) > 0 && !strings.EqualFold(first, second) { + text = text + lineSeparator + second + } + } else { + text = second + } + + return text +} + +func (what *Strings) MergeMap(first map[string]string, second map[string]string) (map[string]string, error) { + for mapKey, mapValue := range second { + _, ok := first[mapKey] + if ok { + return nil, fmt.Errorf("duplicate item %q", mapKey) + } + + first[mapKey] = mapValue + } + + return first, nil +} + +func (what *Strings) MergeUniqueSlice(first []string, second []string) []string { + for _, item := range second { + if !slices.Contains(first, item) { + first = append(first, item) + } + } + + return first +} diff --git a/pkg/input/technical-asset.go b/pkg/input/technical-asset.go new file mode 100644 index 00000000..cfb9a7cf --- /dev/null +++ b/pkg/input/technical-asset.go @@ -0,0 +1,160 @@ +package input + +import "fmt" + +type TechnicalAsset struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Usage string `yaml:"usage,omitempty" json:"usage,omitempty"` + UsedAsClientByHuman bool `yaml:"used_as_client_by_human,omitempty" json:"used_as_client_by_human,omitempty"` + OutOfScope bool `yaml:"out_of_scope,omitempty" json:"out_of_scope,omitempty"` + JustificationOutOfScope string `yaml:"justification_out_of_scope,omitempty" json:"justification_out_of_scope,omitempty"` + Size string `yaml:"size,omitempty" json:"size,omitempty"` + Technology string `yaml:"technology,omitempty" json:"technology,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + Internet bool `yaml:"internet,omitempty" json:"internet,omitempty"` + Machine string `yaml:"machine,omitempty" json:"machine,omitempty"` + Encryption string `yaml:"encryption,omitempty" json:"encryption,omitempty"` + Owner string `yaml:"owner,omitempty" json:"owner,omitempty"` + Confidentiality string `yaml:"confidentiality,omitempty" json:"confidentiality,omitempty"` + Integrity string `yaml:"integrity,omitempty" json:"integrity,omitempty"` + Availability string `yaml:"availability,omitempty" json:"availability,omitempty"` + JustificationCiaRating string `yaml:"justification_cia_rating,omitempty" json:"justification_cia_rating,omitempty"` + MultiTenant bool `yaml:"multi_tenant,omitempty" json:"multi_tenant,omitempty"` + Redundant bool `yaml:"redundant,omitempty" json:"redundant,omitempty"` + CustomDevelopedParts bool `yaml:"custom_developed_parts,omitempty" json:"custom_developed_parts,omitempty"` + DataAssetsProcessed []string `yaml:"data_assets_processed,omitempty" json:"data_assets_processed,omitempty"` + DataAssetsStored []string `yaml:"data_assets_stored,omitempty" json:"data_assets_stored,omitempty"` + DataFormatsAccepted []string `yaml:"data_formats_accepted,omitempty" json:"data_formats_accepted,omitempty"` + DiagramTweakOrder int `yaml:"diagram_tweak_order,omitempty" json:"diagram_tweak_order,omitempty"` + CommunicationLinks map[string]CommunicationLink `yaml:"communication_links,omitempty" json:"communication_links,omitempty"` +} + +func (what *TechnicalAsset) Merge(other TechnicalAsset) error { + var mergeError error + what.ID, mergeError = new(Strings).MergeSingleton(what.ID, other.ID) + if mergeError != nil { + return fmt.Errorf("failed to merge id: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Type, mergeError = new(Strings).MergeSingleton(what.Type, other.Type) + if mergeError != nil { + return fmt.Errorf("failed to merge type: %v", mergeError) + } + + what.Usage, mergeError = new(Strings).MergeSingleton(what.Usage, other.Usage) + if mergeError != nil { + return fmt.Errorf("failed to merge usage: %v", mergeError) + } + + if what.UsedAsClientByHuman == false { + what.UsedAsClientByHuman = other.UsedAsClientByHuman + } + + if what.OutOfScope == false { + what.OutOfScope = other.OutOfScope + } + + what.JustificationOutOfScope = new(Strings).MergeMultiline(what.JustificationOutOfScope, other.JustificationOutOfScope) + + what.Size, mergeError = new(Strings).MergeSingleton(what.Size, other.Size) + if mergeError != nil { + return fmt.Errorf("failed to merge size: %v", mergeError) + } + + what.Technology, mergeError = new(Strings).MergeSingleton(what.Technology, other.Technology) + if mergeError != nil { + return fmt.Errorf("failed to merge technology: %v", mergeError) + } + + what.Tags = new(Strings).MergeUniqueSlice(what.Tags, other.Tags) + + if what.Internet == false { + what.Internet = other.Internet + } + + what.Machine, mergeError = new(Strings).MergeSingleton(what.Machine, other.Machine) + if mergeError != nil { + return fmt.Errorf("failed to merge machine: %v", mergeError) + } + + what.Encryption, mergeError = new(Strings).MergeSingleton(what.Encryption, other.Encryption) + if mergeError != nil { + return fmt.Errorf("failed to merge encryption: %v", mergeError) + } + + what.Owner, mergeError = new(Strings).MergeSingleton(what.Owner, other.Owner) + if mergeError != nil { + return fmt.Errorf("failed to merge owner: %v", mergeError) + } + + what.Confidentiality, mergeError = new(Strings).MergeSingleton(what.Confidentiality, other.Confidentiality) + if mergeError != nil { + return fmt.Errorf("failed to merge confidentiality: %v", mergeError) + } + + what.Integrity, mergeError = new(Strings).MergeSingleton(what.Integrity, other.Integrity) + if mergeError != nil { + return fmt.Errorf("failed to merge integrity: %v", mergeError) + } + + what.Availability, mergeError = new(Strings).MergeSingleton(what.Availability, other.Availability) + if mergeError != nil { + return fmt.Errorf("failed to merge availability: %v", mergeError) + } + + what.JustificationCiaRating = new(Strings).MergeMultiline(what.JustificationCiaRating, other.JustificationCiaRating) + + if what.MultiTenant == false { + what.MultiTenant = other.MultiTenant + } + + if what.Redundant == false { + what.Redundant = other.Redundant + } + + if what.CustomDevelopedParts == false { + what.CustomDevelopedParts = other.CustomDevelopedParts + } + + what.DataAssetsProcessed = new(Strings).MergeUniqueSlice(what.DataAssetsProcessed, other.DataAssetsProcessed) + + what.DataAssetsStored = new(Strings).MergeUniqueSlice(what.DataAssetsStored, other.DataAssetsStored) + + what.DataFormatsAccepted = new(Strings).MergeUniqueSlice(what.DataFormatsAccepted, other.DataFormatsAccepted) + + if what.DiagramTweakOrder == 0 { + what.DiagramTweakOrder = other.DiagramTweakOrder + } + + what.CommunicationLinks, mergeError = new(CommunicationLink).MergeMap(what.CommunicationLinks, other.CommunicationLinks) + if mergeError != nil { + return fmt.Errorf("failed to merge communication_links: %v", mergeError) + } + + return nil +} + +func (what *TechnicalAsset) MergeMap(first map[string]TechnicalAsset, second map[string]TechnicalAsset) (map[string]TechnicalAsset, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge technical asset %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/input/trust-boundary.go b/pkg/input/trust-boundary.go new file mode 100644 index 00000000..48c507e0 --- /dev/null +++ b/pkg/input/trust-boundary.go @@ -0,0 +1,56 @@ +package input + +import "fmt" + +type TrustBoundary struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + TechnicalAssetsInside []string `yaml:"technical_assets_inside,omitempty" json:"technical_assets_inside,omitempty"` + TrustBoundariesNested []string `yaml:"trust_boundaries_nested,omitempty" json:"trust_boundaries_nested,omitempty"` +} + +func (what *TrustBoundary) Merge(other TrustBoundary) error { + var mergeError error + what.ID, mergeError = new(Strings).MergeSingleton(what.ID, other.ID) + if mergeError != nil { + return fmt.Errorf("failed to merge id: %v", mergeError) + } + + what.Description, mergeError = new(Strings).MergeSingleton(what.Description, other.Description) + if mergeError != nil { + return fmt.Errorf("failed to merge description: %v", mergeError) + } + + what.Type, mergeError = new(Strings).MergeSingleton(what.Type, other.Type) + if mergeError != nil { + return fmt.Errorf("failed to merge type: %v", mergeError) + } + + what.Tags = new(Strings).MergeUniqueSlice(what.Tags, other.Tags) + + what.TechnicalAssetsInside = new(Strings).MergeUniqueSlice(what.TechnicalAssetsInside, other.TechnicalAssetsInside) + + what.TrustBoundariesNested = new(Strings).MergeUniqueSlice(what.TrustBoundariesNested, other.TrustBoundariesNested) + + return nil +} + +func (what *TrustBoundary) MergeMap(first map[string]TrustBoundary, second map[string]TrustBoundary) (map[string]TrustBoundary, error) { + for mapKey, mapValue := range second { + mapItem, ok := first[mapKey] + if ok { + mergeError := mapItem.Merge(mapValue) + if mergeError != nil { + return first, fmt.Errorf("failed to merge trust boundary %q: %v", mapKey, mergeError) + } + + first[mapKey] = mapItem + } else { + first[mapKey] = mapValue + } + } + + return first, nil +} diff --git a/pkg/macros/add-build-pipeline-macro.go b/pkg/macros/add-build-pipeline-macro.go new file mode 100644 index 00000000..d032d344 --- /dev/null +++ b/pkg/macros/add-build-pipeline-macro.go @@ -0,0 +1,1023 @@ +package macros + +import ( + "fmt" + "sort" + "strings" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type addBuildPipeline struct { + macroState map[string][]string + questionsAnswered []string + codeInspectionUsed bool + containerTechUsed bool + withinTrustBoundary bool + createNewTrustBoundary bool +} + +func NewBuildPipeline() *addBuildPipeline { + return &addBuildPipeline{ + macroState: make(map[string][]string), + questionsAnswered: make([]string, 0), + } +} + +var pushOrPull = []string{ + "Push-based Deployment (build pipeline deploys towards target asset)", + "Pull-based Deployment (deployment target asset fetches deployment from registry)", +} + +func (m *addBuildPipeline) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "add-build-pipeline", + Title: "Add Build Pipeline", + Description: "This model macro adds a build pipeline (development client, build pipeline, artifact registry, container image registry, " + + "source code repository, etc.) to the model.", + } +} + +// TODO add question for type of machine (either physical, virtual, container, etc.) + +func (m *addBuildPipeline) GetNextQuestion(model *types.ParsedModel) (nextQuestion MacroQuestion, err error) { + counter := len(m.questionsAnswered) + if counter > 3 && !m.codeInspectionUsed { + counter++ + } + if counter > 5 && !m.containerTechUsed { + counter += 2 + } + if counter > 12 && !m.withinTrustBoundary { + counter++ + } + if counter > 13 && !m.createNewTrustBoundary { + counter++ + } + switch counter { + case 0: + return MacroQuestion{ + ID: "source-repository", + Title: "What product is used as the sourcecode repository?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "Git", + }, nil + case 1: + return MacroQuestion{ + ID: "build-pipeline", + Title: "What product is used as the build pipeline?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "Jenkins", + }, nil + case 2: + return MacroQuestion{ + ID: "artifact-registry", + Title: "What product is used as the artifact registry?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "Nexus", + }, nil + case 3: + return MacroQuestion{ + ID: "code-inspection-used", + Title: "Are code inspection platforms (like SonarQube) used?", + Description: "This affects whether code inspection platform are added.", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "Yes", + }, nil + case 4: + return MacroQuestion{ + ID: "code-inspection-platform", + Title: "What product is used as the code inspection platform?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "SonarQube", + }, nil + case 5: + return MacroQuestion{ + ID: "container-technology-used", + Title: "Is container technology (like Docker) used?", + Description: "This affects whether container registries are added.", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "Yes", + }, nil + case 6: + return MacroQuestion{ + ID: "container-registry", + Title: "What product is used as the container registry?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "Docker", + }, nil + case 7: + return MacroQuestion{ + ID: "container-platform", + Title: "What product is used as the container platform (for orchestration and runtime)?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "Kubernetes", + }, nil + case 8: + return MacroQuestion{ + ID: "internet", + Title: "Are build pipeline components exposed on the internet?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "No", + }, nil + case 9: + return MacroQuestion{ + ID: "multi-tenant", + Title: "Are build pipeline components used by multiple tenants?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "No", + }, nil + case 10: + return MacroQuestion{ + ID: "encryption", + Title: "Are build pipeline components encrypted?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "No", + }, nil + case 11: + possibleAnswers := make([]string, 0) + for id := range model.TechnicalAssets { + possibleAnswers = append(possibleAnswers, id) + } + sort.Strings(possibleAnswers) + if len(possibleAnswers) > 0 { + return MacroQuestion{ + ID: "deploy-targets", + Title: "Select all technical assets where the build pipeline deploys to:", + Description: "This affects the communication links being generated.", + PossibleAnswers: possibleAnswers, + MultiSelect: true, + DefaultAnswer: "", + }, nil + } + case 12: + return MacroQuestion{ + ID: "within-trust-boundary", + Title: "Are the server-side components of the build pipeline components within a network trust boundary?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "Yes", + }, nil + case 13: + possibleAnswers := []string{createNewTrustBoundaryLabel} + for id, trustBoundary := range model.TrustBoundaries { + if trustBoundary.Type.IsNetworkBoundary() { + possibleAnswers = append(possibleAnswers, id) + } + } + sort.Strings(possibleAnswers) + return MacroQuestion{ + ID: "selected-trust-boundary", + Title: "Choose from the list of existing network trust boundaries or create a new one?", + Description: "", + PossibleAnswers: possibleAnswers, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 14: + return MacroQuestion{ + ID: "new-trust-boundary-type", + Title: "Of which type shall the new trust boundary be?", + Description: "", + PossibleAnswers: []string{types.NetworkOnPrem.String(), + types.NetworkDedicatedHoster.String(), + types.NetworkVirtualLAN.String(), + types.NetworkCloudProvider.String(), + types.NetworkCloudSecurityGroup.String(), + types.NetworkPolicyNamespaceIsolation.String()}, + MultiSelect: false, + DefaultAnswer: types.NetworkOnPrem.String(), + }, nil + case 15: + return MacroQuestion{ + ID: "push-or-pull", + Title: "What type of deployment strategy is used?", + Description: "Push-based deployments are more classic ones and pull-based are more GitOps-like ones.", + PossibleAnswers: pushOrPull, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 16: + return MacroQuestion{ + ID: "owner", + Title: "Who is the owner of the build pipeline and runtime assets?", + Description: "This name affects the technical asset's and data asset's owner.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "", + }, nil + } + return NoMoreQuestions(), nil +} + +func (m *addBuildPipeline) ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { + m.macroState[questionID] = answer + m.questionsAnswered = append(m.questionsAnswered, questionID) + if questionID == "code-inspection-used" { + m.codeInspectionUsed = strings.ToLower(m.macroState["code-inspection-used"][0]) == "yes" + } else if questionID == "container-technology-used" { + m.containerTechUsed = strings.ToLower(m.macroState["container-technology-used"][0]) == "yes" + } else if questionID == "within-trust-boundary" { + m.withinTrustBoundary = strings.ToLower(m.macroState["within-trust-boundary"][0]) == "yes" + } else if questionID == "selected-trust-boundary" { + m.createNewTrustBoundary = strings.ToLower(m.macroState["selected-trust-boundary"][0]) == strings.ToLower(createNewTrustBoundaryLabel) + } + return "Answer processed", true, nil +} + +func (m *addBuildPipeline) GoBack() (message string, validResult bool, err error) { + if len(m.questionsAnswered) == 0 { + return "Cannot go back further", false, nil + } + lastQuestionID := m.questionsAnswered[len(m.questionsAnswered)-1] + m.questionsAnswered = m.questionsAnswered[:len(m.questionsAnswered)-1] + delete(m.macroState, lastQuestionID) + return "Undo successful", true, nil +} + +func (m *addBuildPipeline) GetFinalChangeImpact(modelInput *input.Model, model *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + changeLogCollector := make([]string, 0) + message, validResult, err = m.applyChange(modelInput, model, &changeLogCollector, true) + return changeLogCollector, message, validResult, err +} + +func (m *addBuildPipeline) Execute(modelInput *input.Model, model *types.ParsedModel) (message string, validResult bool, err error) { + changeLogCollector := make([]string, 0) + message, validResult, err = m.applyChange(modelInput, model, &changeLogCollector, false) + return message, validResult, err +} + +func (m *addBuildPipeline) applyChange(modelInput *input.Model, parsedModel *types.ParsedModel, changeLogCollector *[]string, dryRun bool) (message string, validResult bool, err error) { + var serverSideTechAssets = make([]string, 0) + // ################################################ + modelInput.AddTagToModelInput(m.macroState["source-repository"][0], dryRun, changeLogCollector) + modelInput.AddTagToModelInput(m.macroState["build-pipeline"][0], dryRun, changeLogCollector) + modelInput.AddTagToModelInput(m.macroState["artifact-registry"][0], dryRun, changeLogCollector) + if m.containerTechUsed { + modelInput.AddTagToModelInput(m.macroState["container-registry"][0], dryRun, changeLogCollector) + modelInput.AddTagToModelInput(m.macroState["container-platform"][0], dryRun, changeLogCollector) + } + if m.codeInspectionUsed { + modelInput.AddTagToModelInput(m.macroState["code-inspection-platform"][0], dryRun, changeLogCollector) + } + + sourceRepoID := types.MakeID(m.macroState["source-repository"][0]) + "-sourcecode-repository" + buildPipelineID := types.MakeID(m.macroState["build-pipeline"][0]) + "-build-pipeline" + artifactRegistryID := types.MakeID(m.macroState["artifact-registry"][0]) + "-artifact-registry" + containerRepoID, containerPlatformID, containerSharedRuntimeID := "", "", "" + if m.containerTechUsed { + containerRepoID = types.MakeID(m.macroState["container-registry"][0]) + "-container-registry" + containerPlatformID = types.MakeID(m.macroState["container-platform"][0]) + "-container-platform" + containerSharedRuntimeID = types.MakeID(m.macroState["container-platform"][0]) + "-container-runtime" + } + codeInspectionPlatformID := "" + if m.codeInspectionUsed { + codeInspectionPlatformID = types.MakeID(m.macroState["code-inspection-platform"][0]) + "-code-inspection-platform" + } + owner := m.macroState["owner"][0] + + if _, exists := parsedModel.DataAssets["Sourcecode"]; !exists { + //fmt.Println("Adding data asset:", "sourcecode") // ################################################ + dataAsset := input.DataAsset{ + ID: "sourcecode", + Description: "Sourcecode to build the application components from", + Usage: types.DevOps.String(), + Tags: []string{}, + Origin: "", + Owner: owner, + Quantity: types.Few.String(), + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Sourcecode is at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + } + *changeLogCollector = append(*changeLogCollector, "adding data asset: sourcecode") + if !dryRun { + modelInput.DataAssets["Sourcecode"] = dataAsset + } + } + + if _, exists := parsedModel.DataAssets["Deployment"]; !exists { + //fmt.Println("Adding data asset:", "deployment") // ################################################ + dataAsset := input.DataAsset{ + ID: "deployment", + Description: "Deployment unit being installed/shipped", + Usage: types.DevOps.String(), + Tags: []string{}, + Origin: "", + Owner: owner, + Quantity: types.VeryFew.String(), + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Deployment units are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + } + *changeLogCollector = append(*changeLogCollector, "adding data asset: deployment") + if !dryRun { + modelInput.DataAssets["Deployment"] = dataAsset + } + } + + id := "development-client" + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + + commLinks := make(map[string]input.CommunicationLink) + commLinks["Sourcecode Repository Traffic"] = input.CommunicationLink{ + Target: sourceRepoID, + Description: "Sourcecode Repository Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"sourcecode"}, + DataAssetsReceived: []string{"sourcecode"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + commLinks["Build Pipeline Traffic"] = input.CommunicationLink{ + Target: buildPipelineID, + Description: "Build Pipeline Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + commLinks["Artifact Registry Traffic"] = input.CommunicationLink{ + Target: artifactRegistryID, + Description: "Artifact Registry Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if m.containerTechUsed { + commLinks["Container Registry Traffic"] = input.CommunicationLink{ + Target: containerRepoID, + Description: "Container Registry Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + commLinks["Container Platform Traffic"] = input.CommunicationLink{ + Target: containerPlatformID, + Description: "Container Platform Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + } + if m.codeInspectionUsed { + commLinks["Code Inspection Platform Traffic"] = input.CommunicationLink{ + Target: codeInspectionPlatformID, + Description: "Code Inspection Platform Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.EndUserIdentityPropagation.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"sourcecode"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + } + + techAsset := input.TechnicalAsset{ + ID: id, + Description: "Development Client", + Type: types.ExternalEntity.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: true, + OutOfScope: true, + JustificationOutOfScope: "Development client is not directly in-scope of the application.", + Size: types.System.String(), + Technology: types.DevOpsClient.String(), + Tags: []string{}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Physical.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Sourcecode processing components are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: false, + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"sourcecode", "deployment"}, + DataAssetsStored: []string{"sourcecode", "deployment"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: commLinks, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets["Development Client"] = techAsset + } + } + + id = sourceRepoID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["source-repository"][0] + " Sourcecode Repository", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.SourcecodeRepository.String(), + Tags: []string{input.NormalizeTag(m.macroState["source-repository"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Sourcecode processing components are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"sourcecode"}, + DataAssetsStored: []string{"sourcecode"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["source-repository"][0]+" Sourcecode Repository"] = techAsset + } + } + + if m.containerTechUsed { + id = containerRepoID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["container-registry"][0] + " Container Registry", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.ArtifactRegistry.String(), + Tags: []string{input.NormalizeTag(m.macroState["container-registry"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Container registry components are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"deployment"}, + DataAssetsStored: []string{"deployment"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["container-registry"][0]+" Container Registry"] = techAsset + } + } + + id = containerPlatformID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["container-platform"][0] + " Container Platform", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.System.String(), + Technology: types.ContainerPlatform.String(), + Tags: []string{input.NormalizeTag(m.macroState["container-platform"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.MissionCritical.String(), + Availability: types.MissionCritical.String(), + JustificationCiaRating: "Container platform components are rated as 'mission-critical' in terms of integrity and availability, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"deployment"}, + DataAssetsStored: []string{"deployment"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["container-platform"][0]+" Container Platform"] = techAsset + } + } + } + + id = buildPipelineID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + + commLinks := make(map[string]input.CommunicationLink) + commLinks["Sourcecode Repository Traffic"] = input.CommunicationLink{ + Target: sourceRepoID, + Description: "Sourcecode Repository Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"sourcecode"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + commLinks["Artifact Registry Traffic"] = input.CommunicationLink{ + Target: artifactRegistryID, + Description: "Artifact Registry Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if m.containerTechUsed { + commLinks["Container Registry Traffic"] = input.CommunicationLink{ + Target: containerRepoID, + Description: "Container Registry Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if m.macroState["push-or-pull"][0] == pushOrPull[0] { // Push + commLinks["Container Platform Push"] = input.CommunicationLink{ + Target: containerPlatformID, + Description: "Container Platform Push", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + } else { // Pull + commLinkPull := input.CommunicationLink{ + Target: containerRepoID, + Description: "Container Platform Pull", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if !dryRun { + titleOfTargetAsset := m.macroState["container-platform"][0] + " Container Platform" + containerPlatform := modelInput.TechnicalAssets[titleOfTargetAsset] + if containerPlatform.CommunicationLinks == nil { + containerPlatform.CommunicationLinks = make(map[string]input.CommunicationLink) + } + containerPlatform.CommunicationLinks["Container Platform Pull"] = commLinkPull + modelInput.TechnicalAssets[titleOfTargetAsset] = containerPlatform + } + } + } + if m.codeInspectionUsed { + commLinks["Code Inspection Platform Traffic"] = input.CommunicationLink{ + Target: codeInspectionPlatformID, + Description: "Code Inspection Platform Traffic", + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"sourcecode"}, + DataAssetsReceived: []string{}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + } + // The individual deployments + for _, deployTargetID := range m.macroState["deploy-targets"] { // add a connection to each deployment target + //fmt.Println("Adding deployment flow to:", deployTargetID) + if m.containerTechUsed { + if !dryRun { + containerPlatform := modelInput.TechnicalAssets[m.macroState["container-platform"][0]+" Container Platform"] + if containerPlatform.CommunicationLinks == nil { + containerPlatform.CommunicationLinks = make(map[string]input.CommunicationLink) + } + containerPlatform.CommunicationLinks["Container Spawning ("+deployTargetID+")"] = input.CommunicationLink{ + Target: deployTargetID, + Description: "Container Spawning " + deployTargetID, + Protocol: types.ContainerSpawning.String(), + Authentication: types.NoneAuthentication.String(), + Authorization: types.NoneAuthorization.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: nil, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + modelInput.TechnicalAssets[m.macroState["container-platform"][0]+" Container Platform"] = containerPlatform + } + } else { // No Containers used + if m.macroState["push-or-pull"][0] == pushOrPull[0] { // Push + commLinks["Deployment Push ("+deployTargetID+")"] = input.CommunicationLink{ + Target: deployTargetID, + Description: "Deployment Push to " + deployTargetID, + Protocol: types.SSH.String(), + Authentication: types.ClientCertificate.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"deployment"}, + DataAssetsReceived: nil, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + } else { // Pull + pullFromWhere := artifactRegistryID + commLinkPull := input.CommunicationLink{ + Target: pullFromWhere, + Description: "Deployment Pull from " + deployTargetID, + Protocol: types.HTTPS.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"deployment"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if !dryRun { + // take care to lookup by title (as keyed in input YAML by title and only in parsed model representation by ID) + titleOfTargetAsset := parsedModel.TechnicalAssets[deployTargetID].Title + x := modelInput.TechnicalAssets[titleOfTargetAsset] + if x.CommunicationLinks == nil { + x.CommunicationLinks = make(map[string]input.CommunicationLink) + } + x.CommunicationLinks["Deployment Pull ("+deployTargetID+")"] = commLinkPull + modelInput.TechnicalAssets[titleOfTargetAsset] = x + } + } + } + + // don't forget to also add the "deployment" data asset as stored on the target + targetAssetTitle := parsedModel.TechnicalAssets[deployTargetID].Title + assetsStored := make([]string, 0) + if modelInput.TechnicalAssets[targetAssetTitle].DataAssetsStored != nil { + for _, val := range modelInput.TechnicalAssets[targetAssetTitle].DataAssetsStored { + assetsStored = append(assetsStored, fmt.Sprintf("%v", val)) + } + } + mergedArrays := make([]string, 0) + for _, val := range assetsStored { + mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) + } + mergedArrays = append(mergedArrays, "deployment") + if !dryRun { + x := modelInput.TechnicalAssets[targetAssetTitle] + x.DataAssetsStored = mergedArrays + modelInput.TechnicalAssets[targetAssetTitle] = x + } + } + + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["build-pipeline"][0] + " Build Pipeline", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.BuildPipeline.String(), + Tags: []string{input.NormalizeTag(m.macroState["build-pipeline"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Build pipeline components are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"sourcecode", "deployment"}, + DataAssetsStored: []string{"sourcecode", "deployment"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: commLinks, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["build-pipeline"][0]+" Build Pipeline"] = techAsset + } + } + + id = artifactRegistryID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["artifact-registry"][0] + " Artifact Registry", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.ArtifactRegistry.String(), + Tags: []string{input.NormalizeTag(m.macroState["artifact-registry"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Important.String(), + JustificationCiaRating: "Artifact registry components are at least rated as 'critical' in terms of integrity, because any " + + "malicious modification of it might lead to a backdoored production system.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"sourcecode", "deployment"}, + DataAssetsStored: []string{"sourcecode", "deployment"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["artifact-registry"][0]+" Artifact Registry"] = techAsset + } + } + + if m.codeInspectionUsed { + id = codeInspectionPlatformID + if _, exists := parsedModel.TechnicalAssets[id]; !exists { + //fmt.Println("Adding technical asset:", id) // ################################################ + serverSideTechAssets = append(serverSideTechAssets, id) + encryption := types.NoneEncryption.String() + if strings.ToLower(m.macroState["encryption"][0]) == "yes" { + encryption = types.Transparent.String() + } + techAsset := input.TechnicalAsset{ + ID: id, + Description: m.macroState["code-inspection-platform"][0] + " Code Inspection Platform", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.CodeInspectionPlatform.String(), + Tags: []string{input.NormalizeTag(m.macroState["code-inspection-platform"][0])}, + Internet: strings.ToLower(m.macroState["internet"][0]) == "yes", + Machine: types.Virtual.String(), + Encryption: encryption, + Owner: owner, + Confidentiality: types.Confidential.String(), + Integrity: types.Important.String(), + Availability: types.Operational.String(), + JustificationCiaRating: "Sourcecode inspection platforms are rated at least 'important' in terms of integrity, because any " + + "malicious modification of it might lead to vulnerabilities found by the scanner engine not being shown.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"sourcecode"}, + DataAssetsStored: []string{"sourcecode"}, + DataFormatsAccepted: []string{"file"}, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+id) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["code-inspection-platform"][0]+" Code Inspection Platform"] = techAsset + } + } + } + + if m.withinTrustBoundary { + if m.createNewTrustBoundary { + trustBoundaryType := m.macroState["new-trust-boundary-type"][0] + //fmt.Println("Adding new trust boundary of type:", trustBoundaryType) + title := "DevOps Network" + trustBoundary := input.TrustBoundary{ + ID: "devops-network", + Description: "DevOps Network", + Type: trustBoundaryType, + Tags: []string{}, + TechnicalAssetsInside: serverSideTechAssets, + TrustBoundariesNested: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding trust boundary: devops-network") + if !dryRun { + modelInput.TrustBoundaries[title] = trustBoundary + } + } else { + existingTrustBoundaryToAddTo := m.macroState["selected-trust-boundary"][0] + //fmt.Println("Adding to existing trust boundary:", existingTrustBoundaryToAddTo) + title := parsedModel.TrustBoundaries[existingTrustBoundaryToAddTo].Title + assetsInside := make([]string, 0) + if modelInput.TrustBoundaries[title].TechnicalAssetsInside != nil { + values := modelInput.TrustBoundaries[title].TechnicalAssetsInside + for _, val := range values { + assetsInside = append(assetsInside, fmt.Sprintf("%v", val)) + } + } + mergedArrays := make([]string, 0) + for _, val := range assetsInside { + mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) + } + mergedArrays = append(mergedArrays, serverSideTechAssets...) + *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) + if !dryRun { + if modelInput.TrustBoundaries == nil { + modelInput.TrustBoundaries = make(map[string]input.TrustBoundary) + } + tb := modelInput.TrustBoundaries[title] + tb.TechnicalAssetsInside = mergedArrays + modelInput.TrustBoundaries[title] = tb + } + } + } + + if m.containerTechUsed { + // create shared runtime + assetsRunning := make([]string, 0) + for _, deployTargetID := range m.macroState["deploy-targets"] { + assetsRunning = append(assetsRunning, deployTargetID) + } + title := m.macroState["container-platform"][0] + " Runtime" + sharedRuntime := input.SharedRuntime{ + ID: containerSharedRuntimeID, + Description: title, + Tags: []string{input.NormalizeTag(m.macroState["container-platform"][0])}, + TechnicalAssetsRunning: assetsRunning, + } + *changeLogCollector = append(*changeLogCollector, "adding shared runtime: "+containerSharedRuntimeID) + if !dryRun { + if modelInput.SharedRuntimes == nil { + modelInput.SharedRuntimes = make(map[string]input.SharedRuntime) + } + modelInput.SharedRuntimes[title] = sharedRuntime + } + } + + return "Changeset valid", true, nil +} diff --git a/pkg/macros/add-vault-macro.go b/pkg/macros/add-vault-macro.go new file mode 100644 index 00000000..f879f620 --- /dev/null +++ b/pkg/macros/add-vault-macro.go @@ -0,0 +1,460 @@ +package macros + +import ( + "fmt" + "sort" + "strings" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type addVaultMacro struct { + macroState map[string][]string + questionsAnswered []string + withinTrustBoundary bool + createNewTrustBoundary bool +} + +const createNewTrustBoundaryLabel = "CREATE NEW TRUST BOUNDARY" + +var storageTypes = []string{ + "Cloud Provider (storage buckets or similar)", + "Container Platform (orchestration platform managed storage)", + "Database (SQL-DB, NoSQL-DB, object store or similar)", // TODO let user choose to reuse existing technical asset when shared storage (which would be bad) + "Filesystem (local or remote)", + "In-Memory (no persistent storage of secrets)", + "Service Registry", // TODO let user choose which technical asset the registry is (for comm link) +} +var authenticationTypes = []string{ + "Certificate", + "Cloud Provider (relying on cloud provider instance authentication)", + "Container Platform (orchestration platform managed authentication)", + "Credentials (username/password, API-key, secret token, etc.)", +} + +func NewAddVault() *addVaultMacro { + return &addVaultMacro{ + macroState: make(map[string][]string), + questionsAnswered: make([]string, 0), + } +} + +func (m *addVaultMacro) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "add-vault", + Title: "Add Vault", + Description: "This model macro adds a vault (secret storage) to the model.", + } +} + +func (m *addVaultMacro) GetNextQuestion(parsedModel *types.ParsedModel) (nextQuestion MacroQuestion, err error) { + counter := len(m.questionsAnswered) + if counter > 5 && !m.withinTrustBoundary { + counter++ + } + if counter > 6 && !m.createNewTrustBoundary { + counter++ + } + switch counter { + case 0: + return MacroQuestion{ + ID: "vault-name", + Title: "What product is used as the vault?", + Description: "This name affects the technical asset's title and ID plus also the tags used.", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 1: + return MacroQuestion{ + ID: "storage-type", + Title: "What type of storage is used for the vault?", + Description: "This selection affects the type of technical asset for the persistence.", + PossibleAnswers: storageTypes, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 2: + return MacroQuestion{ + ID: "authentication-type", + Title: "What type of authentication is used for accessing the vault?", + Description: "This selection affects the type of communication links.", + PossibleAnswers: authenticationTypes, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 3: + return MacroQuestion{ + ID: "multi-tenant", + Title: "Is the vault used by multiple tenants?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "No", + }, nil + case 4: + possibleAnswers := make([]string, 0) + for id := range parsedModel.TechnicalAssets { + possibleAnswers = append(possibleAnswers, id) + } + sort.Strings(possibleAnswers) + if len(possibleAnswers) > 0 { + return MacroQuestion{ + ID: "clients", + Title: "Select all technical assets that make use of the vault and access it:", + Description: "This affects the communication links being generated.", + PossibleAnswers: possibleAnswers, + MultiSelect: true, + DefaultAnswer: "", + }, nil + } + case 5: + return MacroQuestion{ + ID: "within-trust-boundary", + Title: "Is the vault placed within a network trust boundary?", + Description: "", + PossibleAnswers: []string{"Yes", "No"}, + MultiSelect: false, + DefaultAnswer: "Yes", + }, nil + case 6: + possibleAnswers := []string{createNewTrustBoundaryLabel} + for id, trustBoundary := range parsedModel.TrustBoundaries { + if trustBoundary.Type.IsNetworkBoundary() { + possibleAnswers = append(possibleAnswers, id) + } + } + sort.Strings(possibleAnswers) + return MacroQuestion{ + ID: "selected-trust-boundary", + Title: "Choose from the list of existing network trust boundaries or create a new one?", + Description: "", + PossibleAnswers: possibleAnswers, + MultiSelect: false, + DefaultAnswer: "", + }, nil + case 7: + return MacroQuestion{ + ID: "new-trust-boundary-type", + Title: "Of which type shall the new trust boundary be?", + Description: "", + PossibleAnswers: []string{types.NetworkOnPrem.String(), + types.NetworkDedicatedHoster.String(), + types.NetworkVirtualLAN.String(), + types.NetworkCloudProvider.String(), + types.NetworkCloudSecurityGroup.String(), + types.NetworkPolicyNamespaceIsolation.String()}, + MultiSelect: false, + DefaultAnswer: types.NetworkOnPrem.String(), + }, nil + } + return NoMoreQuestions(), nil +} + +func (m *addVaultMacro) ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) { + m.macroState[questionID] = answer + m.questionsAnswered = append(m.questionsAnswered, questionID) + if questionID == "within-trust-boundary" { + m.withinTrustBoundary = strings.ToLower(m.macroState["within-trust-boundary"][0]) == "yes" + } else if questionID == "selected-trust-boundary" { + m.createNewTrustBoundary = strings.ToLower(m.macroState["selected-trust-boundary"][0]) == strings.ToLower(createNewTrustBoundaryLabel) + } + return "Answer processed", true, nil +} + +func (m *addVaultMacro) GoBack() (message string, validResult bool, err error) { + if len(m.questionsAnswered) == 0 { + return "Cannot go back further", false, nil + } + lastQuestionID := m.questionsAnswered[len(m.questionsAnswered)-1] + m.questionsAnswered = m.questionsAnswered[:len(m.questionsAnswered)-1] + delete(m.macroState, lastQuestionID) + return "Undo successful", true, nil +} + +func (m *addVaultMacro) GetFinalChangeImpact(modelInput *input.Model, parsedModel *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + changeLogCollector := make([]string, 0) + message, validResult, err = m.applyChange(modelInput, parsedModel, &changeLogCollector, true) + return changeLogCollector, message, validResult, err +} + +func (m *addVaultMacro) Execute(modelInput *input.Model, parsedModel *types.ParsedModel) (message string, validResult bool, err error) { + changeLogCollector := make([]string, 0) + message, validResult, err = m.applyChange(modelInput, parsedModel, &changeLogCollector, false) + return message, validResult, err +} + +func (m *addVaultMacro) applyChange(modelInput *input.Model, parsedModel *types.ParsedModel, changeLogCollector *[]string, dryRun bool) (message string, validResult bool, err error) { + modelInput.AddTagToModelInput(m.macroState["vault-name"][0], dryRun, changeLogCollector) + + var serverSideTechAssets = make([]string, 0) + + if _, exists := parsedModel.DataAssets["Configuration Secrets"]; !exists { + dataAsset := input.DataAsset{ + ID: "configuration-secrets", + Description: "Configuration secrets (like credentials, keys, certificates, etc.) secured and managed by a vault", + Usage: types.DevOps.String(), + Tags: []string{}, + Origin: "", + Owner: "", + Quantity: types.VeryFew.String(), + Confidentiality: types.StrictlyConfidential.String(), + Integrity: types.Critical.String(), + Availability: types.Critical.String(), + JustificationCiaRating: "Configuration secrets are rated as being 'strictly-confidential'.", + } + *changeLogCollector = append(*changeLogCollector, "adding data asset: configuration-secrets") + if !dryRun { + modelInput.DataAssets["Configuration Secrets"] = dataAsset + } + } + + databaseUsed := m.macroState["storage-type"][0] == storageTypes[2] + filesystemUsed := m.macroState["storage-type"][0] == storageTypes[3] + inMemoryUsed := m.macroState["storage-type"][0] == storageTypes[4] + + storageID := "vault-storage" + + if databaseUsed || filesystemUsed { + tech := types.FileServer.String() // TODO ask for local or remote and only local use execution-environment (and add separate tech type LocalFilesystem?) + if databaseUsed { + tech = types.Database.String() + } + if _, exists := parsedModel.TechnicalAssets[storageID]; !exists { + serverSideTechAssets = append(serverSideTechAssets, storageID) + techAsset := input.TechnicalAsset{ + ID: storageID, + Description: "Vault Storage", + Type: types.Datastore.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Component.String(), + Technology: tech, + Tags: []string{}, // TODO: let user enter or too detailed for a wizard? + Internet: false, + Machine: types.Virtual.String(), // TODO: let user enter or too detailed for a wizard? + Encryption: types.DataWithSymmetricSharedKey.String(), // can be assumed for a vault product as at least having some good encryption + Owner: "", + Confidentiality: types.Confidential.String(), + Integrity: types.Critical.String(), + Availability: types.Critical.String(), + JustificationCiaRating: "Vault components are only rated as 'confidential' as vaults usually apply a trust barrier to encrypt all data-at-rest with a vault key.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: nil, + DataAssetsStored: []string{"configuration-secrets"}, + DataFormatsAccepted: nil, + CommunicationLinks: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset: "+storageID) + if !dryRun { + modelInput.TechnicalAssets["Vault Storage"] = techAsset + } + } + } + + vaultID := types.MakeID(m.macroState["vault-name"][0]) + "-vault" + + if _, exists := parsedModel.TechnicalAssets[vaultID]; !exists { + serverSideTechAssets = append(serverSideTechAssets, vaultID) + commLinks := make(map[string]input.CommunicationLink) + + if databaseUsed || filesystemUsed { + accessLink := input.CommunicationLink{ + Target: storageID, + Description: "Vault Storage Access", + Protocol: types.LocalFileAccess.String(), + Authentication: types.Credentials.String(), + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: false, + Usage: types.DevOps.String(), + DataAssetsSent: []string{"configuration-secrets"}, + DataAssetsReceived: []string{"configuration-secrets"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + if databaseUsed { + accessLink.Protocol = types.SqlAccessProtocol.String() // TODO ask if encrypted and ask if NoSQL? or to detailed for a wizard? + } + commLinks["Vault Storage Access"] = accessLink + } + + authentication := types.NoneAuthentication.String() + if m.macroState["authentication-type"][0] == authenticationTypes[0] { + authentication = types.ClientCertificate.String() + } else if m.macroState["authentication-type"][0] == authenticationTypes[1] { + authentication = types.Externalized.String() + } else if m.macroState["authentication-type"][0] == authenticationTypes[2] { + authentication = types.Externalized.String() + } else if m.macroState["authentication-type"][0] == authenticationTypes[3] { + authentication = types.Credentials.String() + } + for _, clientID := range m.macroState["clients"] { // add a connection from each client + clientAccessCommLink := input.CommunicationLink{ + Target: vaultID, + Description: "Vault Access Traffic (by " + clientID + ")", + Protocol: types.HTTPS.String(), + Authentication: authentication, + Authorization: types.TechnicalUser.String(), + Tags: []string{}, + VPN: false, + IpFiltered: false, + Readonly: true, + Usage: types.DevOps.String(), + DataAssetsSent: nil, + DataAssetsReceived: []string{"configuration-secrets"}, + DiagramTweakWeight: 0, + DiagramTweakConstraint: false, + } + clientAssetTitle := parsedModel.TechnicalAssets[clientID].Title + if !dryRun { + client := modelInput.TechnicalAssets[clientAssetTitle] + client.CommunicationLinks["Vault Access ("+clientID+")"] = clientAccessCommLink + modelInput.TechnicalAssets[clientAssetTitle] = client + } + // don't forget to also add the "configuration-secrets" data asset as processed on the client + assetsProcessed := make([]string, 0) + if modelInput.TechnicalAssets[clientAssetTitle].DataAssetsProcessed != nil { + for _, val := range modelInput.TechnicalAssets[clientAssetTitle].DataAssetsProcessed { + assetsProcessed = append(assetsProcessed, fmt.Sprintf("%v", val)) + } + } + mergedArrays := make([]string, 0) + for _, val := range assetsProcessed { + mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) + } + mergedArrays = append(mergedArrays, "configuration-secrets") + if !dryRun { + x := modelInput.TechnicalAssets[clientAssetTitle] + x.DataAssetsProcessed = mergedArrays + modelInput.TechnicalAssets[clientAssetTitle] = x + } + } + + techAsset := input.TechnicalAsset{ + ID: vaultID, + Description: m.macroState["vault-name"][0] + " Vault", + Type: types.Process.String(), + Usage: types.DevOps.String(), + UsedAsClientByHuman: false, + OutOfScope: false, + JustificationOutOfScope: "", + Size: types.Service.String(), + Technology: types.Vault.String(), + Tags: []string{input.NormalizeTag(m.macroState["vault-name"][0])}, + Internet: false, + Machine: types.Virtual.String(), + Encryption: types.Transparent.String(), + Owner: "", + Confidentiality: types.StrictlyConfidential.String(), + Integrity: types.Critical.String(), + Availability: types.Critical.String(), + JustificationCiaRating: "Vault components are rated as 'strictly-confidential'.", + MultiTenant: strings.ToLower(m.macroState["multi-tenant"][0]) == "yes", + Redundant: false, + CustomDevelopedParts: false, + DataAssetsProcessed: []string{"configuration-secrets"}, + DataAssetsStored: nil, + DataFormatsAccepted: nil, + CommunicationLinks: commLinks, + } + if inMemoryUsed { + techAsset.DataAssetsStored = []string{"configuration-secrets"} + } + *changeLogCollector = append(*changeLogCollector, "adding technical asset (including communication links): "+vaultID) + if !dryRun { + modelInput.TechnicalAssets[m.macroState["vault-name"][0]+" Vault"] = techAsset + } + } + + vaultEnvID := "vault-environment" + if filesystemUsed { + title := "Vault Environment" + trustBoundary := input.TrustBoundary{ + ID: vaultEnvID, + Description: "Vault Environment", + Type: types.ExecutionEnvironment.String(), + Tags: []string{}, + TechnicalAssetsInside: []string{vaultID, storageID}, + TrustBoundariesNested: nil, + } + *changeLogCollector = append(*changeLogCollector, "adding trust boundary: "+vaultEnvID) + if !dryRun { + modelInput.TrustBoundaries[title] = trustBoundary + } + } + + if m.withinTrustBoundary { + if m.createNewTrustBoundary { + trustBoundaryType := m.macroState["new-trust-boundary-type"][0] + title := "Vault Network" + trustBoundary := input.TrustBoundary{ + ID: "vault-network", + Description: "Vault Network", + Type: trustBoundaryType, + Tags: []string{}, + } + if filesystemUsed { + trustBoundary.TrustBoundariesNested = []string{vaultEnvID} + } else { + trustBoundary.TechnicalAssetsInside = serverSideTechAssets + } + *changeLogCollector = append(*changeLogCollector, "adding trust boundary: vault-network") + if !dryRun { + modelInput.TrustBoundaries[title] = trustBoundary + } + } else { // adding to existing trust boundary + existingTrustBoundaryToAddTo := m.macroState["selected-trust-boundary"][0] + title := parsedModel.TrustBoundaries[existingTrustBoundaryToAddTo].Title + + if filesystemUsed { // ---------------------- nest as execution-environment trust boundary ---------------------- + boundariesNested := make([]string, 0) + if modelInput.TrustBoundaries[title].TrustBoundariesNested != nil { + values := modelInput.TrustBoundaries[title].TrustBoundariesNested + for _, val := range values { + boundariesNested = append(boundariesNested, fmt.Sprintf("%v", val)) + } + } + mergedArrays := make([]string, 0) + for _, val := range boundariesNested { + mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) + } + mergedArrays = append(mergedArrays, vaultEnvID) + *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) + if !dryRun { + tb := modelInput.TrustBoundaries[title] + tb.TrustBoundariesNested = mergedArrays + modelInput.TrustBoundaries[title] = tb + } + } else { // ---------------------- place assets inside directly ---------------------- + assetsInside := make([]string, 0) + if modelInput.TrustBoundaries[title].TechnicalAssetsInside != nil { + values := modelInput.TrustBoundaries[title].TechnicalAssetsInside + for _, val := range values { + assetsInside = append(assetsInside, fmt.Sprintf("%v", val)) + } + } + mergedArrays := make([]string, 0) + for _, val := range assetsInside { + mergedArrays = append(mergedArrays, fmt.Sprintf("%v", val)) + } + mergedArrays = append(mergedArrays, serverSideTechAssets...) + *changeLogCollector = append(*changeLogCollector, "filling existing trust boundary: "+existingTrustBoundaryToAddTo) + if !dryRun { + tb := modelInput.TrustBoundaries[title] + tb.TechnicalAssetsInside = mergedArrays + modelInput.TrustBoundaries[title] = tb + } + } + } + } + + return "Changeset valid", true, nil +} diff --git a/pkg/macros/macros.go b/pkg/macros/macros.go new file mode 100644 index 00000000..c1f3bcaa --- /dev/null +++ b/pkg/macros/macros.go @@ -0,0 +1,349 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package macros + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" + "gopkg.in/yaml.v3" +) + +type Macros interface { + GetMacroDetails() MacroDetails + GetNextQuestion(model *types.ParsedModel) (nextQuestion MacroQuestion, err error) + ApplyAnswer(questionID string, answer ...string) (message string, validResult bool, err error) + GoBack() (message string, validResult bool, err error) + GetFinalChangeImpact(modelInput *input.Model, model *types.ParsedModel) (changes []string, message string, validResult bool, err error) + Execute(modelInput *input.Model, model *types.ParsedModel) (message string, validResult bool, err error) +} + +func ListBuiltInMacros() []Macros { + return []Macros{ + NewBuildPipeline(), + NewAddVault(), + NewPrettyPrint(), + newRemoveUnusedTags(), + NewSeedRiskTracking(), + NewSeedTags(), + } +} + +func ListCustomMacros() []Macros { + // TODO: implement + return []Macros{} +} + +func GetMacroByID(id string) (Macros, error) { + builtinMacros := ListBuiltInMacros() + customMacros := ListCustomMacros() + allMacros := append(builtinMacros, customMacros...) + for _, macro := range allMacros { + if macro.GetMacroDetails().ID == id { + return macro, nil + } + } + return nil, errors.New("unknown macro id: " + id) +} + +func ExecuteModelMacro(modelInput *input.Model, inputFile string, parsedModel *types.ParsedModel, macroID string) error { + macros, err := GetMacroByID(macroID) + if err != nil { + return err + } + + macroDetails := macros.GetMacroDetails() + + fmt.Println("Executing model macro:", macroDetails.ID) + fmt.Println() + fmt.Println() + printBorder(len(macroDetails.Title), true) + fmt.Println(macroDetails.Title) + printBorder(len(macroDetails.Title), true) + if len(macroDetails.Description) > 0 { + fmt.Println(macroDetails.Description) + } + fmt.Println() + reader := bufio.NewReader(os.Stdin) + for { + nextQuestion, err := macros.GetNextQuestion(parsedModel) + if err != nil { + return err + } + if nextQuestion.NoMoreQuestions() { + break + } + fmt.Println() + printBorder(len(nextQuestion.Title), false) + fmt.Println(nextQuestion.Title) + printBorder(len(nextQuestion.Title), false) + if len(nextQuestion.Description) > 0 { + fmt.Println(nextQuestion.Description) + } + resultingMultiValueSelection := make([]string, 0) + if nextQuestion.IsValueConstrained() { + if nextQuestion.MultiSelect { + selectedValues := make(map[string]bool) + for { + fmt.Println("Please select (multiple executions possible) from the following values (use number to select/deselect):") + fmt.Println(" 0:", "SELECTION PROCESS FINISHED: CONTINUE TO NEXT QUESTION") + for i, val := range nextQuestion.PossibleAnswers { + number := i + 1 + padding, selected := "", " " + if number < 10 { + padding = " " + } + if val, exists := selectedValues[val]; exists && val { + selected = "*" + } + fmt.Println(" "+selected+" "+padding+strconv.Itoa(number)+":", val) + } + fmt.Println() + fmt.Print("Enter number to select/deselect (or 0 when finished): ") + answer, err := reader.ReadString('\n') + // convert CRLF to LF + answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) + if err != nil { + return err + } + if val, err := strconv.Atoi(answer); err == nil { // flip selection + if val == 0 { + for key, selected := range selectedValues { + if selected { + resultingMultiValueSelection = append(resultingMultiValueSelection, key) + } + } + break + } else if val > 0 && val <= len(nextQuestion.PossibleAnswers) { + selectedValues[nextQuestion.PossibleAnswers[val-1]] = !selectedValues[nextQuestion.PossibleAnswers[val-1]] + } + } + } + } else { + fmt.Println("Please choose from the following values (enter value directly or use number):") + for i, val := range nextQuestion.PossibleAnswers { + number := i + 1 + padding := "" + if number < 10 { + padding = " " + } + fmt.Println(" "+padding+strconv.Itoa(number)+":", val) + } + } + } + message := "" + validResult := true + if !nextQuestion.IsValueConstrained() || !nextQuestion.MultiSelect { + fmt.Println() + fmt.Println("Enter your answer (use 'BACK' to go one step back or 'QUIT' to quit without executing the model macro)") + fmt.Print("Answer") + if len(nextQuestion.DefaultAnswer) > 0 { + fmt.Print(" (default '" + nextQuestion.DefaultAnswer + "')") + } + fmt.Print(": ") + answer, err := reader.ReadString('\n') + // convert CRLF to LF + answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) + if err != nil { + return err + } + if len(answer) == 0 && len(nextQuestion.DefaultAnswer) > 0 { // accepting the default + answer = nextQuestion.DefaultAnswer + } else if nextQuestion.IsValueConstrained() { // convert number to value + if val, err := strconv.Atoi(answer); err == nil { + if val > 0 && val <= len(nextQuestion.PossibleAnswers) { + answer = nextQuestion.PossibleAnswers[val-1] + } + } + } + if strings.ToLower(answer) == "quit" { + fmt.Println("Quitting without executing the model macro") + return nil + } else if strings.ToLower(answer) == "back" { + message, validResult, _ = macros.GoBack() + } else if len(answer) > 0 { // individual answer + if nextQuestion.IsValueConstrained() { + if !nextQuestion.IsMatchingValueConstraint(answer) { + fmt.Println() + fmt.Println(">>> INVALID <<<") + fmt.Println("Answer does not match any allowed value. Please try again:") + continue + } + } + message, validResult, _ = macros.ApplyAnswer(nextQuestion.ID, answer) + } + } else { + message, validResult, _ = macros.ApplyAnswer(nextQuestion.ID, resultingMultiValueSelection...) + } + if err != nil { + return err + } + if !validResult { + fmt.Println() + fmt.Println(">>> INVALID <<<") + } + fmt.Println(message) + fmt.Println() + } + for { + fmt.Println() + fmt.Println() + fmt.Println("#################################################################") + fmt.Println("Do you want to execute the model macro (updating the model file)?") + fmt.Println("#################################################################") + fmt.Println() + fmt.Println("The following changes will be applied:") + var changes []string + message := "" + validResult := true + + changes, message, validResult, err = macros.GetFinalChangeImpact(modelInput, parsedModel) + if err != nil { + return err + } + for _, change := range changes { + fmt.Println(" -", change) + } + if !validResult { + fmt.Println() + fmt.Println(">>> INVALID <<<") + } + fmt.Println() + fmt.Println(message) + fmt.Println() + fmt.Print("Apply these changes to the model file?\nType Yes or No: ") + answer, err := reader.ReadString('\n') + // convert CRLF to LF + answer = strings.TrimSpace(strings.Replace(answer, "\n", "", -1)) + if err != nil { + return err + } + answer = strings.ToLower(answer) + fmt.Println() + if answer == "yes" || answer == "y" { + message, validResult, err = macros.Execute(modelInput, parsedModel) + if err != nil { + return err + } + if !validResult { + fmt.Println() + fmt.Println(">>> INVALID <<<") + } + fmt.Println(message) + fmt.Println() + backupFilename := inputFile + ".backup" + fmt.Println("Creating backup model file:", backupFilename) // TODO add random files in /dev/shm space? + _, err = copyFile(inputFile, backupFilename) + if err != nil { + return err + } + fmt.Println("Updating model") + yamlBytes, err := yaml.Marshal(modelInput) + if err != nil { + return err + } + /* + yamlBytes = model.ReformatYAML(yamlBytes) + */ + fmt.Println("Writing model file:", inputFile) + err = os.WriteFile(inputFile, yamlBytes, 0400) + if err != nil { + return err + } + fmt.Println("Model file successfully updated") + return nil + } else if answer == "no" || answer == "n" { + fmt.Println("Quitting without executing the model macro") + return nil + } + } +} + +func printBorder(length int, bold bool) { + char := "-" + if bold { + char = "=" + } + for i := 1; i <= length; i++ { + fmt.Print(char) + } + fmt.Println() +} + +func copyFile(src, dst string) (int64, error) { + sourceFileStat, err := os.Stat(src) + if err != nil { + return 0, err + } + + if !sourceFileStat.Mode().IsRegular() { + return 0, fmt.Errorf("%s is not a regular file", src) + } + + source, err := os.Open(filepath.Clean(src)) + if err != nil { + return 0, err + } + defer func() { _ = source.Close() }() + + destination, err := os.Create(filepath.Clean(dst)) + if err != nil { + return 0, err + } + defer func() { _ = destination.Close() }() + nBytes, err := io.Copy(destination, source) + return nBytes, err +} + +type MacroDetails struct { + ID, Title, Description string +} + +type MacroQuestion struct { + ID, Title, Description string + PossibleAnswers []string + MultiSelect bool + DefaultAnswer string +} + +const NoMoreQuestionsID = "" + +func NoMoreQuestions() MacroQuestion { + return MacroQuestion{ + ID: NoMoreQuestionsID, + Title: "", + Description: "", + PossibleAnswers: nil, + MultiSelect: false, + DefaultAnswer: "", + } +} + +func (what MacroQuestion) NoMoreQuestions() bool { + return what.ID == NoMoreQuestionsID +} + +func (what MacroQuestion) IsValueConstrained() bool { + return what.PossibleAnswers != nil && len(what.PossibleAnswers) > 0 +} + +func (what MacroQuestion) IsMatchingValueConstraint(answer string) bool { + if what.IsValueConstrained() { + for _, val := range what.PossibleAnswers { + if strings.ToLower(val) == strings.ToLower(answer) { + return true + } + } + return false + } + return true +} diff --git a/pkg/macros/pretty-print-macro.go b/pkg/macros/pretty-print-macro.go new file mode 100644 index 00000000..0415da3f --- /dev/null +++ b/pkg/macros/pretty-print-macro.go @@ -0,0 +1,41 @@ +package macros + +import ( + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type prettyPrintMacro struct { +} + +func NewPrettyPrint() *prettyPrintMacro { + return &prettyPrintMacro{} +} + +func (*prettyPrintMacro) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "pretty-print", + Title: "Pretty Print", + Description: "This model macro simply reformats the model file in a pretty-print style.", + } +} + +func (*prettyPrintMacro) GetNextQuestion(_ *types.ParsedModel) (nextQuestion MacroQuestion, err error) { + return NoMoreQuestions(), nil +} + +func (*prettyPrintMacro) ApplyAnswer(_ string, _ ...string) (message string, validResult bool, err error) { + return "Answer processed", true, nil +} + +func (*prettyPrintMacro) GoBack() (message string, validResult bool, err error) { + return "Cannot go back further", false, nil +} + +func (*prettyPrintMacro) GetFinalChangeImpact(_ *input.Model, _ *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + return []string{"pretty-printing the model file"}, "Changeset valid", true, err +} + +func (*prettyPrintMacro) Execute(_ *input.Model, _ *types.ParsedModel) (message string, validResult bool, err error) { + return "Model pretty printing successful", true, nil +} diff --git a/pkg/macros/remove-unused-tags-macro.go b/pkg/macros/remove-unused-tags-macro.go new file mode 100644 index 00000000..28c6b3eb --- /dev/null +++ b/pkg/macros/remove-unused-tags-macro.go @@ -0,0 +1,64 @@ +package macros + +import ( + "github.com/mpvl/unique" + "sort" + "strconv" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type removeUnusedTagsMacro struct { +} + +func newRemoveUnusedTags() *removeUnusedTagsMacro { + return &removeUnusedTagsMacro{} +} + +func (*removeUnusedTagsMacro) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "remove-unused-tags", + Title: "Remove Unused Tags", + Description: "This model macro simply removes all unused tags from the model file.", + } +} + +func (*removeUnusedTagsMacro) GetNextQuestion(*types.ParsedModel) (nextQuestion MacroQuestion, err error) { + return NoMoreQuestions(), nil +} + +func (*removeUnusedTagsMacro) ApplyAnswer(_ string, _ ...string) (message string, validResult bool, err error) { + return "Answer processed", true, nil +} + +func (*removeUnusedTagsMacro) GoBack() (message string, validResult bool, err error) { + return "Cannot go back further", false, nil +} + +func (*removeUnusedTagsMacro) GetFinalChangeImpact(_ *input.Model, _ *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + return []string{"remove unused tags from the model file"}, "Changeset valid", true, err +} + +func (*removeUnusedTagsMacro) Execute(modelInput *input.Model, parsedModel *types.ParsedModel) (message string, validResult bool, err error) { + modelInput.TagsAvailable = parsedModel.TagsAvailable + for _, asset := range parsedModel.DataAssets { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, asset.Tags...) + } + for _, asset := range parsedModel.TechnicalAssets { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, asset.Tags...) + for _, link := range asset.CommunicationLinks { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, link.Tags...) + } + } + for _, boundary := range parsedModel.TrustBoundaries { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, boundary.Tags...) + } + for _, runtime := range parsedModel.SharedRuntimes { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, runtime.Tags...) + } + count := len(modelInput.TagsAvailable) + unique.Strings(&modelInput.TagsAvailable) + sort.Strings(modelInput.TagsAvailable) + return "Model file removal of " + strconv.Itoa(count-len(modelInput.TagsAvailable)) + " unused tags successful", true, nil +} diff --git a/pkg/macros/seed-risk-tracking-macro.go b/pkg/macros/seed-risk-tracking-macro.go new file mode 100644 index 00000000..e365f2f2 --- /dev/null +++ b/pkg/macros/seed-risk-tracking-macro.go @@ -0,0 +1,63 @@ +package macros + +import ( + "sort" + "strconv" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type seedRiskTrackingMacro struct { +} + +func NewSeedRiskTracking() *seedRiskTrackingMacro { + return &seedRiskTrackingMacro{} +} + +func (*seedRiskTrackingMacro) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "seed-risk-tracking", + Title: "Seed Risk Tracking", + Description: "This model macro simply seeds the model file with initial risk tracking entries for all untracked risks.", + } +} + +func (*seedRiskTrackingMacro) GetNextQuestion(*types.ParsedModel) (nextQuestion MacroQuestion, err error) { + return NoMoreQuestions(), nil +} + +func (*seedRiskTrackingMacro) ApplyAnswer(_ string, _ ...string) (message string, validResult bool, err error) { + return "Answer processed", true, nil +} + +func (*seedRiskTrackingMacro) GoBack() (message string, validResult bool, err error) { + return "Cannot go back further", false, nil +} + +func (*seedRiskTrackingMacro) GetFinalChangeImpact(_ *input.Model, _ *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + return []string{"seed the model file with with initial risk tracking entries for all untracked risks"}, "Changeset valid", true, err +} + +func (*seedRiskTrackingMacro) Execute(modelInput *input.Model, parsedModel *types.ParsedModel) (message string, validResult bool, err error) { + syntheticRiskIDsToCreateTrackingFor := make([]string, 0) + for id, risk := range parsedModel.GeneratedRisksBySyntheticId { + if !risk.IsRiskTracked(parsedModel) { + syntheticRiskIDsToCreateTrackingFor = append(syntheticRiskIDsToCreateTrackingFor, id) + } + } + sort.Strings(syntheticRiskIDsToCreateTrackingFor) + if modelInput.RiskTracking == nil { + modelInput.RiskTracking = make(map[string]input.RiskTracking) + } + for _, id := range syntheticRiskIDsToCreateTrackingFor { + modelInput.RiskTracking[id] = input.RiskTracking{ + Status: types.Unchecked.String(), + Justification: "", + Ticket: "", + Date: "", + CheckedBy: "", + } + } + return "Model file seeding with " + strconv.Itoa(len(syntheticRiskIDsToCreateTrackingFor)) + " initial risk tracking successful", true, nil +} diff --git a/pkg/macros/seed-tags-macro.go b/pkg/macros/seed-tags-macro.go new file mode 100644 index 00000000..88cf50b4 --- /dev/null +++ b/pkg/macros/seed-tags-macro.go @@ -0,0 +1,51 @@ +package macros + +import ( + "github.com/mpvl/unique" + "sort" + "strconv" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" +) + +type seedTagsMacro struct { +} + +func NewSeedTags() *seedTagsMacro { + return &seedTagsMacro{} +} + +func (*seedTagsMacro) GetMacroDetails() MacroDetails { + return MacroDetails{ + ID: "seed-tags", + Title: "Seed Tags", + Description: "This model macro simply seeds the model file with supported tags from all risk rules.", + } +} + +func (*seedTagsMacro) GetNextQuestion(parsedModel *types.ParsedModel) (nextQuestion MacroQuestion, err error) { + return NoMoreQuestions(), nil +} + +func (*seedTagsMacro) ApplyAnswer(_ string, _ ...string) (message string, validResult bool, err error) { + return "Answer processed", true, nil +} + +func (*seedTagsMacro) GoBack() (message string, validResult bool, err error) { + return "Cannot go back further", false, nil +} + +func (*seedTagsMacro) GetFinalChangeImpact(_ *input.Model, _ *types.ParsedModel) (changes []string, message string, validResult bool, err error) { + return []string{"seed the model file with supported tags from all risk rules"}, "Changeset valid", true, err +} + +func (*seedTagsMacro) Execute(modelInput *input.Model, parsedModel *types.ParsedModel) (message string, validResult bool, err error) { + modelInput.TagsAvailable = parsedModel.TagsAvailable + for tag := range parsedModel.AllSupportedTags { + modelInput.TagsAvailable = append(modelInput.TagsAvailable, tag) + } + unique.Strings(&modelInput.TagsAvailable) + sort.Strings(modelInput.TagsAvailable) + return "Model file seeding with " + strconv.Itoa(len(parsedModel.AllSupportedTags)) + " tags successful", true, nil +} diff --git a/pkg/model/parse.go b/pkg/model/parse.go new file mode 100644 index 00000000..cc4168c8 --- /dev/null +++ b/pkg/model/parse.go @@ -0,0 +1,787 @@ +package model + +import ( + "errors" + "fmt" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/risks" + "github.com/threagile/threagile/pkg/security/types" +) + +func ParseModel(modelInput *input.Model, builtinRiskRules map[string]risks.RiskRule, customRiskRules map[string]*CustomRisk) (*types.ParsedModel, error) { + businessCriticality, err := types.ParseCriticality(modelInput.BusinessCriticality) + if err != nil { + return nil, errors.New("unknown 'business_criticality' value of application: " + modelInput.BusinessCriticality) + } + + reportDate := time.Now() + if len(modelInput.Date) > 0 { + var parseError error + reportDate, parseError = time.Parse("2006-01-02", modelInput.Date) + if parseError != nil { + return nil, errors.New("unable to parse 'date' value of model file (expected format: '2006-01-02')") + } + } + + parsedModel := types.ParsedModel{ + ThreagileVersion: modelInput.ThreagileVersion, + Title: modelInput.Title, + Author: modelInput.Author, + Contributors: modelInput.Contributors, + Date: types.Date{Time: reportDate}, + AppDescription: removePathElementsFromImageFiles(modelInput.AppDescription), + BusinessOverview: removePathElementsFromImageFiles(modelInput.BusinessOverview), + TechnicalOverview: removePathElementsFromImageFiles(modelInput.TechnicalOverview), + BusinessCriticality: businessCriticality, + ManagementSummaryComment: modelInput.ManagementSummaryComment, + SecurityRequirements: modelInput.SecurityRequirements, + Questions: modelInput.Questions, + AbuseCases: modelInput.AbuseCases, + TagsAvailable: lowerCaseAndTrim(modelInput.TagsAvailable), + DiagramTweakNodesep: modelInput.DiagramTweakNodesep, + DiagramTweakRanksep: modelInput.DiagramTweakRanksep, + DiagramTweakEdgeLayout: modelInput.DiagramTweakEdgeLayout, + DiagramTweakSuppressEdgeLabels: modelInput.DiagramTweakSuppressEdgeLabels, + DiagramTweakLayoutLeftToRight: modelInput.DiagramTweakLayoutLeftToRight, + DiagramTweakInvisibleConnectionsBetweenAssets: modelInput.DiagramTweakInvisibleConnectionsBetweenAssets, + DiagramTweakSameRankAssets: modelInput.DiagramTweakSameRankAssets, + } + + parsedModel.CommunicationLinks = make(map[string]types.CommunicationLink) + parsedModel.AllSupportedTags = make(map[string]bool) + parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId = make(map[string][]types.CommunicationLink) + parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId = make(map[string]types.TrustBoundary) + parsedModel.GeneratedRisksByCategory = make(map[string][]types.Risk) + parsedModel.GeneratedRisksBySyntheticId = make(map[string]types.Risk) + + if parsedModel.DiagramTweakNodesep == 0 { + parsedModel.DiagramTweakNodesep = 2 + } + if parsedModel.DiagramTweakRanksep == 0 { + parsedModel.DiagramTweakRanksep = 2 + } + + // Data Assets =============================================================================== + parsedModel.DataAssets = make(map[string]types.DataAsset) + for title, asset := range modelInput.DataAssets { + id := fmt.Sprintf("%v", asset.ID) + + usage, err := types.ParseUsage(asset.Usage) + if err != nil { + return nil, errors.New("unknown 'usage' value of data asset '" + title + "': " + asset.Usage) + } + quantity, err := types.ParseQuantity(asset.Quantity) + if err != nil { + return nil, errors.New("unknown 'quantity' value of data asset '" + title + "': " + asset.Quantity) + } + confidentiality, err := types.ParseConfidentiality(asset.Confidentiality) + if err != nil { + return nil, errors.New("unknown 'confidentiality' value of data asset '" + title + "': " + asset.Confidentiality) + } + integrity, err := types.ParseCriticality(asset.Integrity) + if err != nil { + return nil, errors.New("unknown 'integrity' value of data asset '" + title + "': " + asset.Integrity) + } + availability, err := types.ParseCriticality(asset.Availability) + if err != nil { + return nil, errors.New("unknown 'availability' value of data asset '" + title + "': " + asset.Availability) + } + + err = checkIdSyntax(id) + if err != nil { + return nil, err + } + if _, exists := parsedModel.DataAssets[id]; exists { + return nil, errors.New("duplicate id used: " + id) + } + tags, err := parsedModel.CheckTags(lowerCaseAndTrim(asset.Tags), "data asset '"+title+"'") + if err != nil { + return nil, err + } + parsedModel.DataAssets[id] = types.DataAsset{ + Id: id, + Title: title, + Usage: usage, + Description: withDefault(fmt.Sprintf("%v", asset.Description), title), + Quantity: quantity, + Tags: tags, + Origin: fmt.Sprintf("%v", asset.Origin), + Owner: fmt.Sprintf("%v", asset.Owner), + Confidentiality: confidentiality, + Integrity: integrity, + Availability: availability, + JustificationCiaRating: fmt.Sprintf("%v", asset.JustificationCiaRating), + } + } + + // Technical Assets =============================================================================== + parsedModel.TechnicalAssets = make(map[string]types.TechnicalAsset) + for title, asset := range modelInput.TechnicalAssets { + id := fmt.Sprintf("%v", asset.ID) + + usage, err := types.ParseUsage(asset.Usage) + if err != nil { + return nil, errors.New("unknown 'usage' value of technical asset '" + title + "': " + asset.Usage) + } + + var dataAssetsStored = make([]string, 0) + if asset.DataAssetsStored != nil { + for _, parsedStoredAssets := range asset.DataAssetsStored { + referencedAsset := fmt.Sprintf("%v", parsedStoredAssets) + if contains(dataAssetsStored, referencedAsset) { + continue + } + + err := parsedModel.CheckDataAssetTargetExists(referencedAsset, "technical asset '"+title+"'") + if err != nil { + return nil, err + } + dataAssetsStored = append(dataAssetsStored, referencedAsset) + } + } + + var dataAssetsProcessed = dataAssetsStored + if asset.DataAssetsProcessed != nil { + for _, parsedProcessedAsset := range asset.DataAssetsProcessed { + referencedAsset := fmt.Sprintf("%v", parsedProcessedAsset) + if contains(dataAssetsProcessed, referencedAsset) { + continue + } + + err := parsedModel.CheckDataAssetTargetExists(referencedAsset, "technical asset '"+title+"'") + if err != nil { + return nil, err + } + dataAssetsProcessed = append(dataAssetsProcessed, referencedAsset) + } + } + + technicalAssetType, err := types.ParseTechnicalAssetType(asset.Type) + if err != nil { + return nil, errors.New("unknown 'type' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Type)) + } + technicalAssetSize, err := types.ParseTechnicalAssetSize(asset.Size) + if err != nil { + return nil, errors.New("unknown 'size' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Size)) + } + technicalAssetTechnology, err := types.ParseTechnicalAssetTechnology(asset.Technology) + if err != nil { + return nil, errors.New("unknown 'technology' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Technology)) + } + encryption, err := types.ParseEncryptionStyle(asset.Encryption) + if err != nil { + return nil, errors.New("unknown 'encryption' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Encryption)) + } + technicalAssetMachine, err := types.ParseTechnicalAssetMachine(asset.Machine) + if err != nil { + return nil, errors.New("unknown 'machine' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Machine)) + } + confidentiality, err := types.ParseConfidentiality(asset.Confidentiality) + if err != nil { + return nil, errors.New("unknown 'confidentiality' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Confidentiality)) + } + integrity, err := types.ParseCriticality(asset.Integrity) + if err != nil { + return nil, errors.New("unknown 'integrity' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Integrity)) + } + availability, err := types.ParseCriticality(asset.Availability) + if err != nil { + return nil, errors.New("unknown 'availability' value of technical asset '" + title + "': " + fmt.Sprintf("%v", asset.Availability)) + } + + dataFormatsAccepted := make([]types.DataFormat, 0) + if asset.DataFormatsAccepted != nil { + for _, dataFormatName := range asset.DataFormatsAccepted { + dataFormat, err := types.ParseDataFormat(dataFormatName) + if err != nil { + return nil, errors.New("unknown 'data_formats_accepted' value of technical asset '" + title + "': " + fmt.Sprintf("%v", dataFormatName)) + } + dataFormatsAccepted = append(dataFormatsAccepted, dataFormat) + } + } + + communicationLinks := make([]types.CommunicationLink, 0) + if asset.CommunicationLinks != nil { + for commLinkTitle, commLink := range asset.CommunicationLinks { + constraint := true + weight := 1 + var dataAssetsSent []string + var dataAssetsReceived []string + + authentication, err := types.ParseAuthentication(commLink.Authentication) + if err != nil { + return nil, errors.New("unknown 'authentication' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Authentication)) + } + authorization, err := types.ParseAuthorization(commLink.Authorization) + if err != nil { + return nil, errors.New("unknown 'authorization' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Authorization)) + } + usage, err := types.ParseUsage(commLink.Usage) + if err != nil { + return nil, errors.New("unknown 'usage' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Usage)) + } + protocol, err := types.ParseProtocol(commLink.Protocol) + if err != nil { + return nil, errors.New("unknown 'protocol' value of technical asset '" + title + "' communication link '" + commLinkTitle + "': " + fmt.Sprintf("%v", commLink.Protocol)) + } + + if commLink.DataAssetsSent != nil { + for _, dataAssetSent := range commLink.DataAssetsSent { + referencedAsset := fmt.Sprintf("%v", dataAssetSent) + if !contains(dataAssetsSent, referencedAsset) { + err := parsedModel.CheckDataAssetTargetExists(referencedAsset, "communication link '"+commLinkTitle+"' of technical asset '"+title+"'") + if err != nil { + return nil, err + } + + dataAssetsSent = append(dataAssetsSent, referencedAsset) + if !contains(dataAssetsProcessed, referencedAsset) { + dataAssetsProcessed = append(dataAssetsProcessed, referencedAsset) + } + } + } + } + + if commLink.DataAssetsReceived != nil { + for _, dataAssetReceived := range commLink.DataAssetsReceived { + referencedAsset := fmt.Sprintf("%v", dataAssetReceived) + if contains(dataAssetsReceived, referencedAsset) { + continue + } + + err := parsedModel.CheckDataAssetTargetExists(referencedAsset, "communication link '"+commLinkTitle+"' of technical asset '"+title+"'") + if err != nil { + return nil, err + } + dataAssetsReceived = append(dataAssetsReceived, referencedAsset) + + if !contains(dataAssetsProcessed, referencedAsset) { + dataAssetsProcessed = append(dataAssetsProcessed, referencedAsset) + } + } + } + + if commLink.DiagramTweakWeight > 0 { + weight = commLink.DiagramTweakWeight + } + + constraint = !commLink.DiagramTweakConstraint + + dataFlowTitle := fmt.Sprintf("%v", commLinkTitle) + if err != nil { + return nil, err + } + commLinkId, err := createDataFlowId(id, dataFlowTitle) + if err != nil { + return nil, err + } + tags, err := parsedModel.CheckTags(lowerCaseAndTrim(commLink.Tags), "communication link '"+commLinkTitle+"' of technical asset '"+title+"'") + if err != nil { + return nil, err + } + commLink := types.CommunicationLink{ + Id: commLinkId, + SourceId: id, + TargetId: commLink.Target, + Title: dataFlowTitle, + Description: withDefault(commLink.Description, dataFlowTitle), + Protocol: protocol, + Authentication: authentication, + Authorization: authorization, + Usage: usage, + Tags: tags, + VPN: commLink.VPN, + IpFiltered: commLink.IpFiltered, + Readonly: commLink.Readonly, + DataAssetsSent: dataAssetsSent, + DataAssetsReceived: dataAssetsReceived, + DiagramTweakWeight: weight, + DiagramTweakConstraint: constraint, + } + communicationLinks = append(communicationLinks, commLink) + // track all comm links + parsedModel.CommunicationLinks[commLink.Id] = commLink + // keep track of map of *all* comm links mapped by target-id (to be able to look up "who is calling me" kind of things) + parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[commLink.TargetId] = append( + parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[commLink.TargetId], commLink) + } + } + + err = checkIdSyntax(id) + if err != nil { + return nil, err + } + if _, exists := parsedModel.TechnicalAssets[id]; exists { + return nil, errors.New("duplicate id used: " + id) + } + tags, err := parsedModel.CheckTags(lowerCaseAndTrim(asset.Tags), "technical asset '"+title+"'") + if err != nil { + return nil, err + } + parsedModel.TechnicalAssets[id] = types.TechnicalAsset{ + Id: id, + Usage: usage, + Title: title, //fmt.Sprintf("%v", asset["title"]), + Description: withDefault(fmt.Sprintf("%v", asset.Description), title), + Type: technicalAssetType, + Size: technicalAssetSize, + Technology: technicalAssetTechnology, + Tags: tags, + Machine: technicalAssetMachine, + Internet: asset.Internet, + Encryption: encryption, + MultiTenant: asset.MultiTenant, + Redundant: asset.Redundant, + CustomDevelopedParts: asset.CustomDevelopedParts, + UsedAsClientByHuman: asset.UsedAsClientByHuman, + OutOfScope: asset.OutOfScope, + JustificationOutOfScope: fmt.Sprintf("%v", asset.JustificationOutOfScope), + Owner: fmt.Sprintf("%v", asset.Owner), + Confidentiality: confidentiality, + Integrity: integrity, + Availability: availability, + JustificationCiaRating: fmt.Sprintf("%v", asset.JustificationCiaRating), + DataAssetsProcessed: dataAssetsProcessed, + DataAssetsStored: dataAssetsStored, + DataFormatsAccepted: dataFormatsAccepted, + CommunicationLinks: communicationLinks, + DiagramTweakOrder: asset.DiagramTweakOrder, + } + } + + // If CIA is lower than that of its data assets, it is implicitly set to the highest CIA value of its data assets + for id, techAsset := range parsedModel.TechnicalAssets { + dataAssetConfidentiality := techAsset.HighestConfidentiality(&parsedModel) + if techAsset.Confidentiality < dataAssetConfidentiality { + techAsset.Confidentiality = dataAssetConfidentiality + } + + dataAssetIntegrity := techAsset.HighestIntegrity(&parsedModel) + if techAsset.Integrity < dataAssetIntegrity { + techAsset.Integrity = dataAssetIntegrity + } + + dataAssetAvailability := techAsset.HighestAvailability(&parsedModel) + if techAsset.Availability < dataAssetAvailability { + techAsset.Availability = dataAssetAvailability + } + + parsedModel.TechnicalAssets[id] = techAsset + } + + // A target of a communication link implicitly processes all data assets that are sent to or received by that target + for id, techAsset := range parsedModel.TechnicalAssets { + for _, commLink := range techAsset.CommunicationLinks { + if commLink.TargetId == id { + continue + } + targetTechAsset := parsedModel.TechnicalAssets[commLink.TargetId] + dataAssetsProcessedByTarget := targetTechAsset.DataAssetsProcessed + for _, dataAssetSent := range commLink.DataAssetsSent { + if !contains(dataAssetsProcessedByTarget, dataAssetSent) { + dataAssetsProcessedByTarget = append(dataAssetsProcessedByTarget, dataAssetSent) + } + } + for _, dataAssetReceived := range commLink.DataAssetsReceived { + if !contains(dataAssetsProcessedByTarget, dataAssetReceived) { + dataAssetsProcessedByTarget = append(dataAssetsProcessedByTarget, dataAssetReceived) + } + } + targetTechAsset.DataAssetsProcessed = dataAssetsProcessedByTarget + parsedModel.TechnicalAssets[commLink.TargetId] = targetTechAsset + } + } + + // Trust Boundaries =============================================================================== + checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries := make(map[string]bool) + parsedModel.TrustBoundaries = make(map[string]types.TrustBoundary) + for title, boundary := range modelInput.TrustBoundaries { + id := fmt.Sprintf("%v", boundary.ID) + + var technicalAssetsInside = make([]string, 0) + if boundary.TechnicalAssetsInside != nil { + parsedInsideAssets := boundary.TechnicalAssetsInside + technicalAssetsInside = make([]string, len(parsedInsideAssets)) + for i, parsedInsideAsset := range parsedInsideAssets { + technicalAssetsInside[i] = fmt.Sprintf("%v", parsedInsideAsset) + _, found := parsedModel.TechnicalAssets[technicalAssetsInside[i]] + if !found { + return nil, errors.New("missing referenced technical asset " + technicalAssetsInside[i] + " at trust boundary '" + title + "'") + } + if checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries[technicalAssetsInside[i]] == true { + return nil, errors.New("referenced technical asset " + technicalAssetsInside[i] + " at trust boundary '" + title + "' is modeled in multiple trust boundaries") + } + checklistToAvoidAssetBeingModeledInMultipleTrustBoundaries[technicalAssetsInside[i]] = true + //fmt.Println("asset "+technicalAssetsInside[i]+" at i="+strconv.Itoa(i)) + } + } + + var trustBoundariesNested = make([]string, 0) + if boundary.TrustBoundariesNested != nil { + parsedNestedBoundaries := boundary.TrustBoundariesNested + trustBoundariesNested = make([]string, len(parsedNestedBoundaries)) + for i, parsedNestedBoundary := range parsedNestedBoundaries { + trustBoundariesNested[i] = fmt.Sprintf("%v", parsedNestedBoundary) + } + } + + trustBoundaryType, err := types.ParseTrustBoundary(boundary.Type) + if err != nil { + return nil, errors.New("unknown 'type' of trust boundary '" + title + "': " + fmt.Sprintf("%v", boundary.Type)) + } + tags, err := parsedModel.CheckTags(lowerCaseAndTrim(boundary.Tags), "trust boundary '"+title+"'") + trustBoundary := types.TrustBoundary{ + Id: id, + Title: title, //fmt.Sprintf("%v", boundary["title"]), + Description: withDefault(fmt.Sprintf("%v", boundary.Description), title), + Type: trustBoundaryType, + Tags: tags, + TechnicalAssetsInside: technicalAssetsInside, + TrustBoundariesNested: trustBoundariesNested, + } + err = checkIdSyntax(id) + if err != nil { + return nil, err + } + if _, exists := parsedModel.TrustBoundaries[id]; exists { + return nil, errors.New("duplicate id used: " + id) + } + parsedModel.TrustBoundaries[id] = trustBoundary + for _, technicalAsset := range trustBoundary.TechnicalAssetsInside { + parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[technicalAsset] = trustBoundary + //fmt.Println("Asset "+technicalAsset+" is directly in trust boundary "+trustBoundary.Id) + } + } + err = parsedModel.CheckNestedTrustBoundariesExisting() + if err != nil { + return nil, err + } + + // Shared Runtime =============================================================================== + parsedModel.SharedRuntimes = make(map[string]types.SharedRuntime) + for title, inputRuntime := range modelInput.SharedRuntimes { + id := fmt.Sprintf("%v", inputRuntime.ID) + + var technicalAssetsRunning = make([]string, 0) + if inputRuntime.TechnicalAssetsRunning != nil { + parsedRunningAssets := inputRuntime.TechnicalAssetsRunning + technicalAssetsRunning = make([]string, len(parsedRunningAssets)) + for i, parsedRunningAsset := range parsedRunningAssets { + assetId := fmt.Sprintf("%v", parsedRunningAsset) + err := parsedModel.CheckTechnicalAssetExists(assetId, "shared runtime '"+title+"'", false) + if err != nil { + return nil, err + } + technicalAssetsRunning[i] = assetId + } + } + tags, err := parsedModel.CheckTags(lowerCaseAndTrim(inputRuntime.Tags), "shared runtime '"+title+"'") + if err != nil { + return nil, err + } + sharedRuntime := types.SharedRuntime{ + Id: id, + Title: title, //fmt.Sprintf("%v", boundary["title"]), + Description: withDefault(fmt.Sprintf("%v", inputRuntime.Description), title), + Tags: tags, + TechnicalAssetsRunning: technicalAssetsRunning, + } + err = checkIdSyntax(id) + if err != nil { + return nil, err + } + if _, exists := parsedModel.SharedRuntimes[id]; exists { + return nil, errors.New("duplicate id used: " + id) + } + parsedModel.SharedRuntimes[id] = sharedRuntime + } + + parsedModel.BuiltInRiskCategories = make(map[string]types.RiskCategory) + for _, rule := range builtinRiskRules { + category := rule.Category() + parsedModel.BuiltInRiskCategories[category.Id] = category + } + + parsedModel.IndividualRiskCategories = make(map[string]types.RiskCategory) + for _, rule := range customRiskRules { + parsedModel.IndividualRiskCategories[rule.Category.Id] = rule.Category + } + + // Individual Risk Categories (just used as regular risk categories) =============================================================================== + // parsedModel.IndividualRiskCategories = make(map[string]types.RiskCategory) + for title, individualCategory := range modelInput.IndividualRiskCategories { + id := fmt.Sprintf("%v", individualCategory.ID) + + function, err := types.ParseRiskFunction(individualCategory.Function) + if err != nil { + return nil, errors.New("unknown 'function' value of individual risk category '" + title + "': " + fmt.Sprintf("%v", individualCategory.Function)) + } + stride, err := types.ParseSTRIDE(individualCategory.STRIDE) + if err != nil { + return nil, errors.New("unknown 'stride' value of individual risk category '" + title + "': " + fmt.Sprintf("%v", individualCategory.STRIDE)) + } + + cat := types.RiskCategory{ + Id: id, + Title: title, + Description: withDefault(fmt.Sprintf("%v", individualCategory.Description), title), + Impact: fmt.Sprintf("%v", individualCategory.Impact), + ASVS: fmt.Sprintf("%v", individualCategory.ASVS), + CheatSheet: fmt.Sprintf("%v", individualCategory.CheatSheet), + Action: fmt.Sprintf("%v", individualCategory.Action), + Mitigation: fmt.Sprintf("%v", individualCategory.Mitigation), + Check: fmt.Sprintf("%v", individualCategory.Check), + DetectionLogic: fmt.Sprintf("%v", individualCategory.DetectionLogic), + RiskAssessment: fmt.Sprintf("%v", individualCategory.RiskAssessment), + FalsePositives: fmt.Sprintf("%v", individualCategory.FalsePositives), + Function: function, + STRIDE: stride, + ModelFailurePossibleReason: individualCategory.ModelFailurePossibleReason, + CWE: individualCategory.CWE, + } + err = checkIdSyntax(id) + if err != nil { + return nil, err + } + if _, exists := parsedModel.IndividualRiskCategories[id]; exists { + return nil, errors.New("duplicate id used: " + id) + } + parsedModel.IndividualRiskCategories[id] = cat + + // NOW THE INDIVIDUAL RISK INSTANCES: + //individualRiskInstances := make([]model.Risk, 0) + if individualCategory.RisksIdentified != nil { // TODO: also add syntax checks of input YAML when linked asset is not found or when synthetic-id is already used... + for title, individualRiskInstance := range individualCategory.RisksIdentified { + var mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId string + var dataBreachProbability types.DataBreachProbability + var dataBreachTechnicalAssetIDs []string + severity, err := types.ParseRiskSeverity(individualRiskInstance.Severity) + if err != nil { + return nil, errors.New("unknown 'severity' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", individualRiskInstance.Severity)) + } + exploitationLikelihood, err := types.ParseRiskExploitationLikelihood(individualRiskInstance.ExploitationLikelihood) + if err != nil { + return nil, errors.New("unknown 'exploitation_likelihood' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", individualRiskInstance.ExploitationLikelihood)) + } + exploitationImpact, err := types.ParseRiskExploitationImpact(individualRiskInstance.ExploitationImpact) + if err != nil { + return nil, errors.New("unknown 'exploitation_impact' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", individualRiskInstance.ExploitationImpact)) + } + + if len(individualRiskInstance.MostRelevantDataAsset) > 0 { + mostRelevantDataAssetId = fmt.Sprintf("%v", individualRiskInstance.MostRelevantDataAsset) + err := parsedModel.CheckDataAssetTargetExists(mostRelevantDataAssetId, "individual risk '"+title+"'") + if err != nil { + return nil, err + } + } + + if len(individualRiskInstance.MostRelevantTechnicalAsset) > 0 { + mostRelevantTechnicalAssetId = fmt.Sprintf("%v", individualRiskInstance.MostRelevantTechnicalAsset) + err := parsedModel.CheckTechnicalAssetExists(mostRelevantTechnicalAssetId, "individual risk '"+title+"'", false) + if err != nil { + return nil, err + } + } + + if len(individualRiskInstance.MostRelevantCommunicationLink) > 0 { + mostRelevantCommunicationLinkId = fmt.Sprintf("%v", individualRiskInstance.MostRelevantCommunicationLink) + err := parsedModel.CheckCommunicationLinkExists(mostRelevantCommunicationLinkId, "individual risk '"+title+"'") + if err != nil { + return nil, err + } + } + + if len(individualRiskInstance.MostRelevantTrustBoundary) > 0 { + mostRelevantTrustBoundaryId = fmt.Sprintf("%v", individualRiskInstance.MostRelevantTrustBoundary) + err := parsedModel.CheckTrustBoundaryExists(mostRelevantTrustBoundaryId, "individual risk '"+title+"'") + if err != nil { + return nil, err + } + } + + if len(individualRiskInstance.MostRelevantSharedRuntime) > 0 { + mostRelevantSharedRuntimeId = fmt.Sprintf("%v", individualRiskInstance.MostRelevantSharedRuntime) + err := parsedModel.CheckSharedRuntimeExists(mostRelevantSharedRuntimeId, "individual risk '"+title+"'") + if err != nil { + return nil, err + } + } + + dataBreachProbability, err = types.ParseDataBreachProbability(individualRiskInstance.DataBreachProbability) + if err != nil { + return nil, errors.New("unknown 'data_breach_probability' value of individual risk instance '" + title + "': " + fmt.Sprintf("%v", individualRiskInstance.DataBreachProbability)) + } + + if individualRiskInstance.DataBreachTechnicalAssets != nil { + dataBreachTechnicalAssetIDs = make([]string, len(individualRiskInstance.DataBreachTechnicalAssets)) + for i, parsedReferencedAsset := range individualRiskInstance.DataBreachTechnicalAssets { + assetId := fmt.Sprintf("%v", parsedReferencedAsset) + err := parsedModel.CheckTechnicalAssetExists(assetId, "data breach technical assets of individual risk '"+title+"'", false) + if err != nil { + return nil, err + } + dataBreachTechnicalAssetIDs[i] = assetId + } + } + + parsedModel.GeneratedRisksByCategory[cat.Id] = append(parsedModel.GeneratedRisksByCategory[cat.Id], types.Risk{ + SyntheticId: createSyntheticId(cat.Id, mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId), + Title: fmt.Sprintf("%v", title), + CategoryId: cat.Id, + Severity: severity, + ExploitationLikelihood: exploitationLikelihood, + ExploitationImpact: exploitationImpact, + MostRelevantDataAssetId: mostRelevantDataAssetId, + MostRelevantTechnicalAssetId: mostRelevantTechnicalAssetId, + MostRelevantCommunicationLinkId: mostRelevantCommunicationLinkId, + MostRelevantTrustBoundaryId: mostRelevantTrustBoundaryId, + MostRelevantSharedRuntimeId: mostRelevantSharedRuntimeId, + DataBreachProbability: dataBreachProbability, + DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, + }) + } + } + } + + // Risk Tracking =============================================================================== + parsedModel.RiskTracking = make(map[string]types.RiskTracking) + for syntheticRiskId, riskTracking := range modelInput.RiskTracking { + justification := fmt.Sprintf("%v", riskTracking.Justification) + checkedBy := fmt.Sprintf("%v", riskTracking.CheckedBy) + ticket := fmt.Sprintf("%v", riskTracking.Ticket) + var date time.Time + if len(riskTracking.Date) > 0 { + var parseError error + date, parseError = time.Parse("2006-01-02", riskTracking.Date) + if parseError != nil { + return nil, errors.New("unable to parse 'date' of risk tracking '" + syntheticRiskId + "': " + riskTracking.Date) + } + } + + status, err := types.ParseRiskStatus(riskTracking.Status) + if err != nil { + return nil, errors.New("unknown 'status' value of risk tracking '" + syntheticRiskId + "': " + riskTracking.Status) + } + + tracking := types.RiskTracking{ + SyntheticRiskId: strings.TrimSpace(syntheticRiskId), + Justification: justification, + CheckedBy: checkedBy, + Ticket: ticket, + Date: types.Date{Time: date}, + Status: status, + } + + parsedModel.RiskTracking[syntheticRiskId] = tracking + } + + // ====================== model consistency check (linking) + for _, technicalAsset := range parsedModel.TechnicalAssets { + for _, commLink := range technicalAsset.CommunicationLinks { + err := parsedModel.CheckTechnicalAssetExists(commLink.TargetId, "communication link '"+commLink.Title+"' of technical asset '"+technicalAsset.Title+"'", false) + if err != nil { + return nil, err + } + } + } + + /* + data, _ := json.MarshalIndent(parsedModel, "", " ") + _ = os.WriteFile(filepath.Join("all.json"), data, 0644) + */ + + /** + inYamlData, _ := yaml.Marshal(modelInput) + _ = os.WriteFile(filepath.Join("in.yaml"), inYamlData, 0644) + + inJsonData, _ := json.MarshalIndent(modelInput, "", " ") + _ = os.WriteFile(filepath.Join("in.json"), inJsonData, 0644) + + outYamlData, _ := yaml.Marshal(parsedModel) + _ = os.WriteFile(filepath.Join("out.yaml"), outYamlData, 0644) + + outJsonData, _ := json.MarshalIndent(parsedModel, "", " ") + _ = os.WriteFile(filepath.Join("out.json"), outJsonData, 0644) + /**/ + + return &parsedModel, nil +} + +func checkIdSyntax(id string) error { + validIdSyntax := regexp.MustCompile(`^[a-zA-Z0-9\-]+$`) + if !validIdSyntax.MatchString(id) { + return errors.New("invalid id syntax used (only letters, numbers, and hyphen allowed): " + id) + } + return nil +} + +func createDataFlowId(sourceAssetId, title string) (string, error) { + reg, err := regexp.Compile("[^A-Za-z0-9]+") + if err != nil { + return "", err + } + return sourceAssetId + ">" + strings.Trim(reg.ReplaceAllString(strings.ToLower(title), "-"), "- "), nil +} + +func createSyntheticId(categoryId string, + mostRelevantDataAssetId, mostRelevantTechnicalAssetId, mostRelevantCommunicationLinkId, mostRelevantTrustBoundaryId, mostRelevantSharedRuntimeId string) string { + result := categoryId + if len(mostRelevantTechnicalAssetId) > 0 { + result += "@" + mostRelevantTechnicalAssetId + } + if len(mostRelevantCommunicationLinkId) > 0 { + result += "@" + mostRelevantCommunicationLinkId + } + if len(mostRelevantTrustBoundaryId) > 0 { + result += "@" + mostRelevantTrustBoundaryId + } + if len(mostRelevantSharedRuntimeId) > 0 { + result += "@" + mostRelevantSharedRuntimeId + } + if len(mostRelevantDataAssetId) > 0 { + result += "@" + mostRelevantDataAssetId + } + return result +} + +// in order to prevent Path-Traversal like stuff... +func removePathElementsFromImageFiles(overview input.Overview) input.Overview { + for i := range overview.Images { + newValue := make(map[string]string) + for file, desc := range overview.Images[i] { + newValue[filepath.Base(file)] = desc + } + overview.Images[i] = newValue + } + return overview +} + +func withDefault(value string, defaultWhenEmpty string) string { + trimmed := strings.TrimSpace(value) + if len(trimmed) > 0 && trimmed != "" { + return trimmed + } + return strings.TrimSpace(defaultWhenEmpty) +} + +func lowerCaseAndTrim(tags []string) []string { + for i := range tags { + tags[i] = strings.ToLower(strings.TrimSpace(tags[i])) + } + return tags +} + +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} diff --git a/pkg/model/parse_test.go b/pkg/model/parse_test.go new file mode 100644 index 00000000..4718e3dc --- /dev/null +++ b/pkg/model/parse_test.go @@ -0,0 +1,188 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package model + +import ( + "testing" + + "github.com/google/uuid" + + "github.com/stretchr/testify/assert" + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/risks" + "github.com/threagile/threagile/pkg/security/types" +) + +func TestDefaultInputNotFail(t *testing.T) { + parsedModel, err := ParseModel(createInputModel(make(map[string]input.TechnicalAsset), make(map[string]input.DataAsset)), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + + assert.NoError(t, err) + assert.NotNil(t, parsedModel) +} + +func TestInferConfidentiality_NotSet_NoOthers_ExpectTODO(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + // TODO: rename test and check if everyone agree that by default it should be public if there are no other assets + + assert.NoError(t, err) +} + +func TestInferConfidentiality_ExpectHighestConfidentiality(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + daConfidentialConfidentiality := createDataAsset(types.Confidential, types.Critical, types.Critical) + da[daConfidentialConfidentiality.ID] = daConfidentialConfidentiality + + daRestrictedConfidentiality := createDataAsset(types.Restricted, types.Important, types.Important) + da[daRestrictedConfidentiality.ID] = daRestrictedConfidentiality + + daPublicConfidentiality := createDataAsset(types.Public, types.Archive, types.Archive) + da[daPublicConfidentiality.ID] = daPublicConfidentiality + + taWithConfidentialConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithConfidentialConfidentialityDataAsset.DataAssetsProcessed = append(taWithConfidentialConfidentialityDataAsset.DataAssetsProcessed, daConfidentialConfidentiality.ID) + ta[taWithConfidentialConfidentialityDataAsset.ID] = taWithConfidentialConfidentialityDataAsset + + taWithRestrictedConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithRestrictedConfidentialityDataAsset.DataAssetsProcessed = append(taWithRestrictedConfidentialityDataAsset.DataAssetsProcessed, daRestrictedConfidentiality.ID) + ta[taWithRestrictedConfidentialityDataAsset.ID] = taWithRestrictedConfidentialityDataAsset + + taWithPublicConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithPublicConfidentialityDataAsset.DataAssetsProcessed = append(taWithPublicConfidentialityDataAsset.DataAssetsProcessed, daPublicConfidentiality.ID) + ta[taWithPublicConfidentialityDataAsset.ID] = taWithPublicConfidentialityDataAsset + + parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + + assert.NoError(t, err) + assert.Equal(t, types.Confidential, parsedModel.TechnicalAssets[taWithConfidentialConfidentialityDataAsset.ID].Confidentiality) + assert.Equal(t, types.Restricted, parsedModel.TechnicalAssets[taWithRestrictedConfidentialityDataAsset.ID].Confidentiality) + assert.Equal(t, types.Internal, parsedModel.TechnicalAssets[taWithPublicConfidentialityDataAsset.ID].Confidentiality) +} + +func TestInferIntegrity_NotSet_NoOthers_ExpectTODO(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + // TODO: rename test and check if everyone agree that by default it should be public if there are no other assets + + assert.NoError(t, err) +} + +func TestInferIntegrity_ExpectHighestIntegrity(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + daCriticalIntegrity := createDataAsset(types.Confidential, types.Critical, types.Critical) + da[daCriticalIntegrity.ID] = daCriticalIntegrity + + daImportantIntegrity := createDataAsset(types.Restricted, types.Important, types.Important) + da[daImportantIntegrity.ID] = daImportantIntegrity + + daArchiveIntegrity := createDataAsset(types.Public, types.Archive, types.Archive) + da[daArchiveIntegrity.ID] = daArchiveIntegrity + + taWithCriticalIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithCriticalIntegrityDataAsset.DataAssetsProcessed = append(taWithCriticalIntegrityDataAsset.DataAssetsProcessed, daCriticalIntegrity.ID) + ta[taWithCriticalIntegrityDataAsset.ID] = taWithCriticalIntegrityDataAsset + + taWithImportantIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithImportantIntegrityDataAsset.DataAssetsProcessed = append(taWithImportantIntegrityDataAsset.DataAssetsProcessed, daImportantIntegrity.ID) + ta[taWithImportantIntegrityDataAsset.ID] = taWithImportantIntegrityDataAsset + + taWithArchiveIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithArchiveIntegrityDataAsset.DataAssetsProcessed = append(taWithArchiveIntegrityDataAsset.DataAssetsProcessed, daArchiveIntegrity.ID) + ta[taWithArchiveIntegrityDataAsset.ID] = taWithArchiveIntegrityDataAsset + + parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + + assert.NoError(t, err) + assert.Equal(t, types.Critical, parsedModel.TechnicalAssets[taWithCriticalIntegrityDataAsset.ID].Integrity) + assert.Equal(t, types.Important, parsedModel.TechnicalAssets[taWithImportantIntegrityDataAsset.ID].Integrity) + assert.Equal(t, types.Operational, parsedModel.TechnicalAssets[taWithArchiveIntegrityDataAsset.ID].Integrity) +} + +func TestInferAvailability_NotSet_NoOthers_ExpectTODO(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + + assert.NoError(t, err) +} + +func TestInferAvailability_ExpectHighestAvailability(t *testing.T) { + ta := make(map[string]input.TechnicalAsset) + da := make(map[string]input.DataAsset) + + daCriticalAvailability := createDataAsset(types.Confidential, types.Critical, types.Critical) + da[daCriticalAvailability.ID] = daCriticalAvailability + + daImportantAvailability := createDataAsset(types.Restricted, types.Important, types.Important) + da[daImportantAvailability.ID] = daImportantAvailability + + daArchiveAvailability := createDataAsset(types.Public, types.Archive, types.Archive) + da[daArchiveAvailability.ID] = daArchiveAvailability + + taWithCriticalAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithCriticalAvailabilityDataAsset.DataAssetsProcessed = append(taWithCriticalAvailabilityDataAsset.DataAssetsProcessed, daCriticalAvailability.ID) + ta[taWithCriticalAvailabilityDataAsset.ID] = taWithCriticalAvailabilityDataAsset + + taWithImportantAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithImportantAvailabilityDataAsset.DataAssetsProcessed = append(taWithImportantAvailabilityDataAsset.DataAssetsProcessed, daImportantAvailability.ID) + ta[taWithImportantAvailabilityDataAsset.ID] = taWithImportantAvailabilityDataAsset + + taWithArchiveAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational) + taWithArchiveAvailabilityDataAsset.DataAssetsProcessed = append(taWithArchiveAvailabilityDataAsset.DataAssetsProcessed, daArchiveAvailability.ID) + ta[taWithArchiveAvailabilityDataAsset.ID] = taWithArchiveAvailabilityDataAsset + + parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk)) + + assert.NoError(t, err) + assert.Equal(t, types.Critical, parsedModel.TechnicalAssets[taWithCriticalAvailabilityDataAsset.ID].Availability) + assert.Equal(t, types.Important, parsedModel.TechnicalAssets[taWithImportantAvailabilityDataAsset.ID].Availability) + assert.Equal(t, types.Operational, parsedModel.TechnicalAssets[taWithArchiveAvailabilityDataAsset.ID].Availability) +} + +func createInputModel(technicalAssets map[string]input.TechnicalAsset, dataAssets map[string]input.DataAsset) *input.Model { + return &input.Model{ + TechnicalAssets: technicalAssets, + DataAssets: dataAssets, + + // set some dummy values to bypass validation + BusinessCriticality: "archive", + } +} + +func createTechnicalAsset(confidentiality types.Confidentiality, integrity types.Criticality, availability types.Criticality) input.TechnicalAsset { + return input.TechnicalAsset{ + ID: uuid.New().String(), + // those values are required to bypass validation + Usage: "business", + Type: "process", + Size: "system", + Technology: "unknown-technology", + Encryption: "none", + Machine: "virtual", + Confidentiality: confidentiality.String(), + Integrity: integrity.String(), + Availability: availability.String(), + } +} + +func createDataAsset(confidentiality types.Confidentiality, integrity types.Criticality, availability types.Criticality) input.DataAsset { + return input.DataAsset{ + ID: uuid.New().String(), + Usage: "business", + Quantity: "few", + Confidentiality: confidentiality.String(), + Integrity: integrity.String(), + Availability: availability.String(), + } +} diff --git a/pkg/model/read.go b/pkg/model/read.go new file mode 100644 index 00000000..814cf8f5 --- /dev/null +++ b/pkg/model/read.go @@ -0,0 +1,164 @@ +package model + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/risks" + "github.com/threagile/threagile/pkg/security/types" +) + +type progressReporter interface { + Info(a ...any) + Warn(a ...any) + Error(a ...any) +} + +type ReadResult struct { + ModelInput *input.Model + ParsedModel *types.ParsedModel + IntroTextRAA string + BuiltinRiskRules map[string]risks.RiskRule + CustomRiskRules map[string]*CustomRisk +} + +// TODO: consider about splitting this function into smaller ones for better reusability +func ReadAndAnalyzeModel(config common.Config, progressReporter progressReporter) (*ReadResult, error) { + progressReporter.Info("Writing into output directory:", config.OutputFolder) + progressReporter.Info("Parsing model:", config.InputFile) + + builtinRiskRules := make(map[string]risks.RiskRule) + for _, rule := range risks.GetBuiltInRiskRules() { + builtinRiskRules[rule.Category().Id] = rule + } + customRiskRules := LoadCustomRiskRules(config.RiskRulesPlugins, progressReporter) + + modelInput := new(input.Model).Defaults() + loadError := modelInput.Load(config.InputFile) + if loadError != nil { + return nil, fmt.Errorf("unable to load model yaml: %v", loadError) + } + + parsedModel, parseError := ParseModel(modelInput, builtinRiskRules, customRiskRules) + if parseError != nil { + return nil, fmt.Errorf("unable to parse model yaml: %v", parseError) + } + + introTextRAA := applyRAA(parsedModel, config.BinFolder, config.RAAPlugin, progressReporter) + + applyRiskGeneration(parsedModel, customRiskRules, builtinRiskRules, + config.SkipRiskRules, progressReporter) + err := parsedModel.ApplyWildcardRiskTrackingEvaluation(config.IgnoreOrphanedRiskTracking, progressReporter) + if err != nil { + return nil, fmt.Errorf("unable to apply wildcard risk tracking evaluation: %v", err) + } + + err = parsedModel.CheckRiskTracking(config.IgnoreOrphanedRiskTracking, progressReporter) + if err != nil { + return nil, fmt.Errorf("unable to check risk tracking: %v", err) + } + + return &ReadResult{ + ModelInput: modelInput, + ParsedModel: parsedModel, + IntroTextRAA: introTextRAA, + BuiltinRiskRules: builtinRiskRules, + CustomRiskRules: customRiskRules, + }, nil +} + +func applyRisk(parsedModel *types.ParsedModel, rule risks.RiskRule, skippedRules *map[string]bool) { + id := rule.Category().Id + _, ok := (*skippedRules)[id] + + if ok { + fmt.Printf("Skipping risk rule %q\n", rule.Category().Id) + delete(*skippedRules, rule.Category().Id) + } else { + parsedModel.AddToListOfSupportedTags(rule.SupportedTags()) + generatedRisks := rule.GenerateRisks(parsedModel) + if generatedRisks != nil { + if len(generatedRisks) > 0 { + parsedModel.GeneratedRisksByCategory[rule.Category().Id] = generatedRisks + } + } else { + fmt.Printf("Failed to generate risks for %q\n", id) + } + } +} + +// TODO: refactor skipRiskRules to be a string array instead of a comma-separated string +func applyRiskGeneration(parsedModel *types.ParsedModel, customRiskRules map[string]*CustomRisk, + builtinRiskRules map[string]risks.RiskRule, + skipRiskRules string, + progressReporter progressReporter) { + progressReporter.Info("Applying risk generation") + + skippedRules := make(map[string]bool) + if len(skipRiskRules) > 0 { + for _, id := range strings.Split(skipRiskRules, ",") { + skippedRules[id] = true + } + } + + for _, rule := range builtinRiskRules { + applyRisk(parsedModel, rule, &skippedRules) + } + + // NOW THE CUSTOM RISK RULES (if any) + for id, customRule := range customRiskRules { + _, ok := skippedRules[id] + if ok { + progressReporter.Info("Skipping custom risk rule:", id) + delete(skippedRules, id) + } else { + progressReporter.Info("Executing custom risk rule:", id) + parsedModel.AddToListOfSupportedTags(customRule.Tags) + customRisks := customRule.GenerateRisks(parsedModel) + if len(customRisks) > 0 { + parsedModel.GeneratedRisksByCategory[customRule.Category.Id] = customRisks + } + + progressReporter.Info("Added custom risks:", len(customRisks)) + } + } + + if len(skippedRules) > 0 { + keys := make([]string, 0) + for k := range skippedRules { + keys = append(keys, k) + } + if len(keys) > 0 { + progressReporter.Info("Unknown risk rules to skip:", keys) + } + } + + // save also in map keyed by synthetic risk-id + for _, category := range types.SortedRiskCategories(parsedModel) { + someRisks := types.SortedRisksOfCategory(parsedModel, category) + for _, risk := range someRisks { + parsedModel.GeneratedRisksBySyntheticId[strings.ToLower(risk.SyntheticId)] = risk + } + } +} + +func applyRAA(parsedModel *types.ParsedModel, binFolder, raaPlugin string, progressReporter progressReporter) string { + progressReporter.Info("Applying RAA calculation:", raaPlugin) + + runner, loadError := new(runner).Load(filepath.Join(binFolder, raaPlugin)) + if loadError != nil { + progressReporter.Warn(fmt.Sprintf("WARNING: raa %q not loaded: %v\n", raaPlugin, loadError)) + return "" + } + + runError := runner.Run(parsedModel, parsedModel) + if runError != nil { + progressReporter.Warn(fmt.Sprintf("WARNING: raa %q not applied: %v\n", raaPlugin, runError)) + return "" + } + + return runner.ErrorOutput +} diff --git a/pkg/model/rules.go b/pkg/model/rules.go new file mode 100644 index 00000000..c50b4050 --- /dev/null +++ b/pkg/model/rules.go @@ -0,0 +1,62 @@ +package model + +import ( + "fmt" + "log" + "strings" + + "github.com/threagile/threagile/pkg/security/types" +) + +type CustomRisk struct { + ID string + Category types.RiskCategory + Tags []string + Runner *runner +} + +func (r *CustomRisk) GenerateRisks(m *types.ParsedModel) []types.Risk { + if r.Runner == nil { + return nil + } + + risks := make([]types.Risk, 0) + runError := r.Runner.Run(m, &risks, "-generate-risks") + if runError != nil { + log.Fatalf("Failed to generate risks for custom risk rule %q: %v\n", r.Runner.Filename, runError) + } + + return risks +} + +func LoadCustomRiskRules(pluginFiles []string, reporter progressReporter) map[string]*CustomRisk { + customRiskRuleList := make([]string, 0) + customRiskRules := make(map[string]*CustomRisk) + if len(pluginFiles) > 0 { + reporter.Info("Loading custom risk rules:", strings.Join(pluginFiles, ", ")) + + for _, pluginFile := range pluginFiles { + if len(pluginFile) > 0 { + runner, loadError := new(runner).Load(pluginFile) + if loadError != nil { + reporter.Error(fmt.Sprintf("WARNING: Custom risk rule %q not loaded: %v\n", pluginFile, loadError)) + } + + risk := new(CustomRisk) + runError := runner.Run(nil, &risk, "-get-info") + if runError != nil { + reporter.Error(fmt.Sprintf("WARNING: Failed to get info for custom risk rule %q: %v\n", pluginFile, runError)) + } + + risk.Runner = runner + customRiskRules[risk.ID] = risk + customRiskRuleList = append(customRiskRuleList, risk.ID) + reporter.Info("Custom risk rule loaded:", risk.ID) + } + } + + reporter.Info("Loaded custom risk rules:", strings.Join(customRiskRuleList, ", ")) + } + + return customRiskRules +} diff --git a/pkg/model/runner.go b/pkg/model/runner.go new file mode 100644 index 00000000..3cd9c211 --- /dev/null +++ b/pkg/model/runner.go @@ -0,0 +1,91 @@ +// TODO: consider moving to internal +package model + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" +) + +type runner struct { + Filename string + Parameters []string + In any + Out any + ErrorOutput string +} + +func (p *runner) Load(filename string) (*runner, error) { + *p = runner{ + Filename: filename, + } + + fileInfo, statError := os.Stat(filename) + if statError != nil { + return p, statError + } + + if !fileInfo.Mode().IsRegular() { + return p, fmt.Errorf("run %q is not a regular file", filename) + } + + return p, nil +} + +func (p *runner) Run(in any, out any, parameters ...string) error { + *p = runner{ + Filename: p.Filename, + Parameters: parameters, + In: in, + Out: out, + } + + plugin := exec.Command(p.Filename, p.Parameters...) // #nosec G204 + stdin, stdinError := plugin.StdinPipe() + if stdinError != nil { + return stdinError + } + defer func() { _ = stdin.Close() }() + + var stdoutBuf bytes.Buffer + plugin.Stdout = &stdoutBuf + + var stderrBuf bytes.Buffer + plugin.Stderr = &stderrBuf + + startError := plugin.Start() + if startError != nil { + return startError + } + + inData, inError := json.MarshalIndent(p.In, "", " ") + if inError != nil { + return inError + } + + _, writeError := stdin.Write(inData) + if writeError != nil { + return writeError + } + + inCloseError := stdin.Close() + if inCloseError != nil { + return inCloseError + } + + waitError := plugin.Wait() + p.ErrorOutput = stderrBuf.String() + if waitError != nil { + return fmt.Errorf("%v: %v", waitError, p.ErrorOutput) + } + + stdout := stdoutBuf.Bytes() + unmarshalError := json.Unmarshal(stdout, &p.Out) + if unmarshalError != nil { + return unmarshalError + } + + return nil +} diff --git a/pkg/report/colors.go b/pkg/report/colors.go new file mode 100644 index 00000000..e8f3fdb2 --- /dev/null +++ b/pkg/report/colors.go @@ -0,0 +1,472 @@ +package report + +import ( + "encoding/hex" + "fmt" + + "github.com/jung-kurt/gofpdf" + "github.com/threagile/threagile/pkg/security/types" +) + +const ( + Amber = "#AF780E" + Green = "#008000" + Blue = "#000080" + DarkBlue = "#000060" + Black = "#000000" + Gray = "#444444" + LightGray = "#666666" + MiddleLightGray = "#999999" + MoreLightGray = "#D2D2D2" + VeryLightGray = "#E5E5E5" + ExtremeLightGray = "#F6F6F6" + Pink = "#F987C5" + LightPink = "#FFE7EF" + Red = "#CC0000" + OutOfScopeFancy = "#D5D7FF" + CustomDevelopedParts = "#FFFC97" + ExtremeLightBlue = "#DDFFFF" + LightBlue = "#77FFFF" + Brown = "#8C4C17" +) + +func DarkenHexColor(hexString string) string { + colorBytes, _ := hex.DecodeString(hexString[1:]) + adjusted := make([]byte, 3) + for i := 0; i < 3; i++ { + if colorBytes[i] > 0x22 { + adjusted[i] = colorBytes[i] - 0x20 + } else { + adjusted[i] = 0x00 + } + } + return "#" + hex.EncodeToString(adjusted) +} + +func BrightenHexColor(hexString string) string { + colorBytes, _ := hex.DecodeString(hexString[1:]) + adjusted := make([]byte, 3) + for i := 0; i < 3; i++ { + if colorBytes[i] < 0xDD { + adjusted[i] = colorBytes[i] + 0x20 + } else { + adjusted[i] = 0xFF + } + } + return "#" + hex.EncodeToString(adjusted) +} + +func ColorCriticalRisk(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(255, 38, 0) +} +func RgbHexColorCriticalRisk() string { + return "#FF2600" +} + +func ColorHighRisk(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(160, 40, 30) +} +func RgbHexColorHighRisk() string { + return "#A0281E" +} + +func ColorElevatedRisk(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(255, 142, 0) +} +func RgbHexColorElevatedRisk() string { + return "#FF8E00" +} + +func ColorMediumRisk(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(200, 120, 50) +} +func RgbHexColorMediumRisk() string { + return "#C87832" +} + +func ColorLowRisk(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(35, 70, 95) +} +func RgbHexColorLowRisk() string { + return "#23465F" +} + +func ColorOutOfScope(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(127, 127, 127) +} +func RgbHexColorOutOfScope() string { + return "#7F7F7F" +} + +func ColorRiskStatusUnchecked(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(256, 0, 0) +} +func RgbHexColorRiskStatusUnchecked() string { + return "#FF0000" +} + +func ColorRiskStatusMitigated(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(0, 143, 0) +} +func RgbHexColorRiskStatusMitigated() string { + return "#008F00" +} + +func ColorRiskStatusInProgress(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(0, 0, 256) +} +func RgbHexColorRiskStatusInProgress() string { + return "#0000FF" +} + +func ColorRiskStatusAccepted(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(255, 64, 255) +} +func RgbHexColorRiskStatusAccepted() string { + return "#FF40FF" +} + +func ColorRiskStatusInDiscussion(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(256, 147, 0) +} +func RgbHexColorRiskStatusInDiscussion() string { + return "#FF9300" +} + +func ColorRiskStatusFalsePositive(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(102, 102, 102) +} +func RgbHexColorRiskStatusFalsePositive() string { + return "#666666" +} + +func ColorTwilight(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(58, 82, 200) +} +func RgbHexColorTwilight() string { + return "#3A52C8" +} + +func ColorBusiness(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(83, 27, 147) +} +func RgbHexColorBusiness() string { + return "#531B93" +} + +func ColorArchitecture(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(0, 84, 147) +} +func RgbHexColorArchitecture() string { + return "#005493" +} + +func ColorDevelopment(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(222, 146, 35) +} +func RgbHexColorDevelopment() string { + return "#DE9223" +} + +func ColorOperation(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(148, 127, 80) +} +func RgbHexColorOperation() string { + return "#947F50" +} + +func ColorModelFailure(pdf *gofpdf.Fpdf) { + pdf.SetTextColor(148, 82, 0) +} +func RgbHexColorModelFailure() string { + return "#945200" +} + +func determineArrowLineStyle(cl types.CommunicationLink) string { + if len(cl.DataAssetsSent) == 0 && len(cl.DataAssetsReceived) == 0 { + return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... + } + if cl.Usage == types.DevOps { + return "dashed" + } + return "solid" +} + +// Pen Widths: + +func determineArrowPenWidth(cl types.CommunicationLink, parsedModel *types.ParsedModel) string { + if determineArrowColor(cl, parsedModel) == Pink { + return fmt.Sprintf("%f", 3.0) + } + if determineArrowColor(cl, parsedModel) != Black { + return fmt.Sprintf("%f", 2.5) + } + return fmt.Sprintf("%f", 1.5) +} + +func determineLabelColor(cl types.CommunicationLink, parsedModel *types.ParsedModel) string { + // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here + /* + if dataFlow.Protocol.IsEncrypted() { + return Gray + } else {*/ + // check for red + for _, sentDataAsset := range cl.DataAssetsSent { + if parsedModel.DataAssets[sentDataAsset].Integrity == types.MissionCritical { + return Red + } + } + for _, receivedDataAsset := range cl.DataAssetsReceived { + if parsedModel.DataAssets[receivedDataAsset].Integrity == types.MissionCritical { + return Red + } + } + // check for amber + for _, sentDataAsset := range cl.DataAssetsSent { + if parsedModel.DataAssets[sentDataAsset].Integrity == types.Critical { + return Amber + } + } + for _, receivedDataAsset := range cl.DataAssetsReceived { + if parsedModel.DataAssets[receivedDataAsset].Integrity == types.Critical { + return Amber + } + } + // default + return Gray + +} + +// pink when model forgery attempt (i.e. nothing being sent and received) + +func determineArrowColor(cl types.CommunicationLink, parsedModel *types.ParsedModel) string { + // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here + if len(cl.DataAssetsSent) == 0 && len(cl.DataAssetsReceived) == 0 || + cl.Protocol == types.UnknownProtocol { + return Pink // pink, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... + } + if cl.Usage == types.DevOps { + return MiddleLightGray + } else if cl.VPN { + return DarkBlue + } else if cl.IpFiltered { + return Brown + } + // check for red + for _, sentDataAsset := range cl.DataAssetsSent { + if parsedModel.DataAssets[sentDataAsset].Confidentiality == types.StrictlyConfidential { + return Red + } + } + for _, receivedDataAsset := range cl.DataAssetsReceived { + if parsedModel.DataAssets[receivedDataAsset].Confidentiality == types.StrictlyConfidential { + return Red + } + } + // check for amber + for _, sentDataAsset := range cl.DataAssetsSent { + if parsedModel.DataAssets[sentDataAsset].Confidentiality == types.Confidential { + return Amber + } + } + for _, receivedDataAsset := range cl.DataAssetsReceived { + if parsedModel.DataAssets[receivedDataAsset].Confidentiality == types.Confidential { + return Amber + } + } + // default + return Black + /* + } else if dataFlow.Authentication != NoneAuthentication { + return Black + } else { + // check for red + for _, sentDataAsset := range dataFlow.DataAssetsSent { // first check if any red? + if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == MissionCritical { + return Red + } + } + for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // first check if any red? + if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == MissionCritical { + return Red + } + } + // check for amber + for _, sentDataAsset := range dataFlow.DataAssetsSent { // then check if any amber? + if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == Critical { + return Amber + } + } + for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // then check if any amber? + if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == Critical { + return Amber + } + } + return Black + } + */ +} + +// red when >= confidential data stored in unencrypted technical asset + +func determineTechnicalAssetLabelColor(ta types.TechnicalAsset, model *types.ParsedModel) string { + // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here + // Check for red + if ta.Integrity == types.MissionCritical { + return Red + } + for _, storedDataAsset := range ta.DataAssetsStored { + if model.DataAssets[storedDataAsset].Integrity == types.MissionCritical { + return Red + } + } + for _, processedDataAsset := range ta.DataAssetsProcessed { + if model.DataAssets[processedDataAsset].Integrity == types.MissionCritical { + return Red + } + } + // Check for amber + if ta.Integrity == types.Critical { + return Amber + } + for _, storedDataAsset := range ta.DataAssetsStored { + if model.DataAssets[storedDataAsset].Integrity == types.Critical { + return Amber + } + } + for _, processedDataAsset := range ta.DataAssetsProcessed { + if model.DataAssets[processedDataAsset].Integrity == types.Critical { + return Amber + } + } + return Black + /* + if what.Encrypted { + return Black + } else { + if what.Confidentiality == StrictlyConfidential { + return Red + } + for _, storedDataAsset := range what.DataAssetsStored { + if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == StrictlyConfidential { + return Red + } + } + if what.Confidentiality == Confidential { + return Amber + } + for _, storedDataAsset := range what.DataAssetsStored { + if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == Confidential { + return Amber + } + } + return Black + } + */ +} + +// red when mission-critical integrity, but still unauthenticated (non-readonly) channels access it +// amber when critical integrity, but still unauthenticated (non-readonly) channels access it +// pink when model forgery attempt (i.e. nothing being processed) +func determineShapeBorderColor(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string { + // Check for red + if ta.Confidentiality == types.StrictlyConfidential { + return Red + } + for _, processedDataAsset := range ta.DataAssetsProcessed { + if parsedModel.DataAssets[processedDataAsset].Confidentiality == types.StrictlyConfidential { + return Red + } + } + // Check for amber + if ta.Confidentiality == types.Confidential { + return Amber + } + for _, processedDataAsset := range ta.DataAssetsProcessed { + if parsedModel.DataAssets[processedDataAsset].Confidentiality == types.Confidential { + return Amber + } + } + return Black + /* + if what.Integrity == MissionCritical { + for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { + if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication { + return Red + } + } + } + + if what.Integrity == Critical { + for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { + if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication { + return Amber + } + } + } + + if len(what.DataAssetsProcessed) == 0 && len(what.DataAssetsStored) == 0 { + return Pink // pink, because it's strange when too many technical assets process no data... some are ok, but many in a diagram is a sign of model forgery... + } + + return Black + */ +} + +// dotted when model forgery attempt (i.e. nothing being processed or stored) + +func determineShapeBorderLineStyle(ta types.TechnicalAsset) string { + if len(ta.DataAssetsProcessed) == 0 || ta.OutOfScope { + return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery... + } + return "solid" +} + +// 3 when redundant + +func determineShapePeripheries(ta types.TechnicalAsset) int { + if ta.Redundant { + return 2 + } + return 1 +} + +func determineShapeStyle(ta types.TechnicalAsset) string { + return "filled" +} + +func determineShapeFillColor(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string { + fillColor := VeryLightGray + if len(ta.DataAssetsProcessed) == 0 && len(ta.DataAssetsStored) == 0 || + ta.Technology == types.UnknownTechnology { + fillColor = LightPink // lightPink, because it's strange when too many technical assets process no data... some ok, but many in a diagram ist a sign of model forgery... + } else if len(ta.CommunicationLinks) == 0 && len(parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[ta.Id]) == 0 { + fillColor = LightPink + } else if ta.Internet { + fillColor = ExtremeLightBlue + } else if ta.OutOfScope { + fillColor = OutOfScopeFancy + } else if ta.CustomDevelopedParts { + fillColor = CustomDevelopedParts + } + switch ta.Machine { + case types.Physical: + fillColor = DarkenHexColor(fillColor) + case types.Container: + fillColor = BrightenHexColor(fillColor) + case types.Serverless: + fillColor = BrightenHexColor(BrightenHexColor(fillColor)) + case types.Virtual: + } + return fillColor +} + +func determineShapeBorderPenWidth(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string { + if determineShapeBorderColor(ta, parsedModel) == Pink { + return fmt.Sprintf("%f", 3.5) + } + if determineShapeBorderColor(ta, parsedModel) != Black { + return fmt.Sprintf("%f", 3.0) + } + return fmt.Sprintf("%f", 2.0) +} diff --git a/report/excel.go b/pkg/report/excel.go similarity index 73% rename from report/excel.go rename to pkg/report/excel.go index 3159b04b..38bccb94 100644 --- a/report/excel.go +++ b/pkg/report/excel.go @@ -1,42 +1,45 @@ package report import ( - "github.com/threagile/threagile/colors" - "github.com/threagile/threagile/model" - "github.com/xuri/excelize/v2" + "fmt" "sort" "strconv" "strings" -) -var excelRow int + "github.com/threagile/threagile/pkg/security/types" + "github.com/xuri/excelize/v2" +) -func WriteRisksExcelToFile(filename string) { - excelRow = 0 +func WriteRisksExcelToFile(parsedModel *types.ParsedModel, filename string) error { + excelRow := 0 excel := excelize.NewFile() - sheetName := model.ParsedModelRoot.Title + sheetName := parsedModel.Title err := excel.SetDocProps(&excelize.DocProperties{ Category: "Threat Model Risks Summary", ContentStatus: "Final", - Creator: model.ParsedModelRoot.Author.Name, + Creator: parsedModel.Author.Name, Description: sheetName + " via Threagile", Identifier: "xlsx", Keywords: "Threat Model", - LastModifiedBy: model.ParsedModelRoot.Author.Name, + LastModifiedBy: parsedModel.Author.Name, Revision: "0", Subject: sheetName, Title: sheetName, Language: "en-US", Version: "1.0.0", }) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set doc properties: %w", err) + } sheetIndex, _ := excel.NewSheet(sheetName) _ = excel.DeleteSheet("Sheet1") orientation := "landscape" size := 9 err = excel.SetPageLayout(sheetName, &excelize.PageLayoutOptions{Orientation: &orientation, Size: &size}) // A4 - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set page layout: %w", err) + } err = excel.SetHeaderFooter(sheetName, &excelize.HeaderFooterOptions{ DifferentFirst: false, @@ -45,9 +48,11 @@ func WriteRisksExcelToFile(filename string) { OddFooter: "&C&F", EvenHeader: "&L&P", EvenFooter: "&L&D&R&T", - FirstHeader: `&Threat Model &"-,` + model.ParsedModelRoot.Title + `"Bold&"-,Regular"Risks Summary+000A&D`, + FirstHeader: `&Threat Model &"-,` + parsedModel.Title + `"Bold&"-,Regular"Risks Summary+000A&D`, }) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set header/footer: %w", err) + } err = excel.SetCellValue(sheetName, "A1", "Severity") err = excel.SetCellValue(sheetName, "B1", "Likelihood") @@ -90,17 +95,19 @@ func WriteRisksExcelToFile(filename string) { err = excel.SetColWidth(sheetName, "R", "R", 18) err = excel.SetColWidth(sheetName, "S", "S", 20) err = excel.SetColWidth(sheetName, "T", "T", 20) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set column width: %w", err) + } - // styleSeverityCriticalBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorCriticalRisk() + `","size":12,"bold":true}}`) + // styleSeverityCriticalBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorCriticalRisk() + `","size":12,"bold":true}}`) styleSeverityCriticalBold, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorCriticalRisk(), + Color: RgbHexColorCriticalRisk(), Size: 12, Bold: true, }, }) - // styleSeverityCriticalCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorCriticalRisk() + `","size":12}}`) + // styleSeverityCriticalCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorCriticalRisk() + `","size":12}}`) styleSeverityCriticalCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -108,19 +115,19 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorCriticalRisk(), + Color: RgbHexColorCriticalRisk(), Size: 12, }, }) - // styleSeverityHighBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorHighRisk() + `","size":12,"bold":true}}`) + // styleSeverityHighBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorHighRisk() + `","size":12,"bold":true}}`) styleSeverityHighBold, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorHighRisk(), + Color: RgbHexColorHighRisk(), Size: 12, Bold: true, }, }) - // styleSeverityHighCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorHighRisk() + `","size":12}}`) + // styleSeverityHighCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorHighRisk() + `","size":12}}`) styleSeverityHighCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -128,19 +135,19 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorHighRisk(), + Color: RgbHexColorHighRisk(), Size: 12, }, }) - // styleSeverityElevatedBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorElevatedRisk() + `","size":12,"bold":true}}`) + // styleSeverityElevatedBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorElevatedRisk() + `","size":12,"bold":true}}`) styleSeverityElevatedBold, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorElevatedRisk(), + Color: RgbHexColorElevatedRisk(), Size: 12, Bold: true, }, }) - // styleSeverityElevatedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorElevatedRisk() + `","size":12}}`) + // styleSeverityElevatedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorElevatedRisk() + `","size":12}}`) styleSeverityElevatedCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -148,19 +155,19 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorElevatedRisk(), + Color: RgbHexColorElevatedRisk(), Size: 12, }, }) - // styleSeverityMediumBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorMediumRisk() + `","size":12,"bold":true}}`) + // styleSeverityMediumBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorMediumRisk() + `","size":12,"bold":true}}`) styleSeverityMediumBold, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorMediumRisk(), + Color: RgbHexColorMediumRisk(), Size: 12, Bold: true, }, }) - // styleSeverityMediumCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorMediumRisk() + `","size":12}}`) + // styleSeverityMediumCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorMediumRisk() + `","size":12}}`) styleSeverityMediumCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -168,19 +175,19 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorMediumRisk(), + Color: RgbHexColorMediumRisk(), Size: 12, }, }) - // styleSeverityLowBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorLowRisk() + `","size":12,"bold":true}}`) + // styleSeverityLowBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorLowRisk() + `","size":12,"bold":true}}`) styleSeverityLowBold, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorLowRisk(), + Color: RgbHexColorLowRisk(), Size: 12, Bold: true, }, }) - // styleSeverityLowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorLowRisk() + `","size":12}}`) + // styleSeverityLowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorLowRisk() + `","size":12}}`) styleSeverityLowCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -188,12 +195,12 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorLowRisk(), + Color: RgbHexColorLowRisk(), Size: 12, }, }) - // styleRedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorRiskStatusUnchecked() + `","size":12}}`) + // styleRedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorRiskStatusUnchecked() + `","size":12}}`) styleRedCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -201,11 +208,11 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorLowRisk(), + Color: RgbHexColorLowRisk(), Size: 12, }, }) - // styleGreenCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorRiskStatusMitigated() + `","size":12}}`) + // styleGreenCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorRiskStatusMitigated() + `","size":12}}`) styleGreenCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -213,11 +220,11 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusMitigated(), + Color: RgbHexColorRiskStatusMitigated(), Size: 12, }, }) - // styleBlueCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusInProgress() + `","size":12}}`) + // styleBlueCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusInProgress() + `","size":12}}`) styleBlueCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -225,11 +232,11 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusInProgress(), + Color: RgbHexColorRiskStatusInProgress(), Size: 12, }, }) - // styleYellowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusAccepted() + `","size":12}}`) + // styleYellowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusAccepted() + `","size":12}}`) styleYellowCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -237,11 +244,11 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusAccepted(), + Color: RgbHexColorRiskStatusAccepted(), Size: 12, }, }) - // styleOrangeCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusInDiscussion() + `","size":12}}`) + // styleOrangeCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusInDiscussion() + `","size":12}}`) styleOrangeCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -249,11 +256,11 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusInDiscussion(), + Color: RgbHexColorRiskStatusInDiscussion(), Size: 12, }, }) - // styleGrayCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusFalsePositive() + `","size":12}}`) + // styleGrayCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusFalsePositive() + `","size":12}}`) styleGrayCenter, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ Horizontal: "center", @@ -261,7 +268,7 @@ func WriteRisksExcelToFile(filename string) { WrapText: false, }, Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusFalsePositive(), + Color: RgbHexColorRiskStatusFalsePositive(), Size: 12, }, }) @@ -308,10 +315,10 @@ func WriteRisksExcelToFile(filename string) { Size: 10, }, }) - // styleGraySmall, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorOutOfScope() + `","size":10}}`) + // styleGraySmall, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorOutOfScope() + `","size":10}}`) styleGraySmall, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorOutOfScope(), + Color: RgbHexColorOutOfScope(), Size: 10, }, }) @@ -328,41 +335,41 @@ func WriteRisksExcelToFile(filename string) { Bold: true, }, }) - // styleMitigation, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorRiskStatusMitigated() + `","size":10}}`) + // styleMitigation, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorRiskStatusMitigated() + `","size":10}}`) styleMitigation, err := excel.NewStyle(&excelize.Style{ Font: &excelize.Font{ - Color: colors.RgbHexColorRiskStatusMitigated(), + Color: RgbHexColorRiskStatusMitigated(), Size: 10, }, }) excelRow++ // as we have a header line - for _, category := range model.SortedRiskCategories() { - risks := model.SortedRisksOfCategory(category) + for _, category := range types.SortedRiskCategories(parsedModel) { + risks := types.SortedRisksOfCategory(parsedModel, category) for _, risk := range risks { excelRow++ - techAsset := model.ParsedModelRoot.TechnicalAssets[risk.MostRelevantTechnicalAssetId] - commLink := model.CommunicationLinks[risk.MostRelevantCommunicationLinkId] - riskTrackingStatus := risk.GetRiskTrackingStatusDefaultingUnchecked() + techAsset := parsedModel.TechnicalAssets[risk.MostRelevantTechnicalAssetId] + commLink := parsedModel.CommunicationLinks[risk.MostRelevantCommunicationLinkId] + riskTrackingStatus := risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) // content err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(excelRow), risk.Severity.Title()) err = excel.SetCellValue(sheetName, "B"+strconv.Itoa(excelRow), risk.ExploitationLikelihood.Title()) err = excel.SetCellValue(sheetName, "C"+strconv.Itoa(excelRow), risk.ExploitationImpact.Title()) - err = excel.SetCellValue(sheetName, "D"+strconv.Itoa(excelRow), risk.Category.STRIDE.Title()) - err = excel.SetCellValue(sheetName, "E"+strconv.Itoa(excelRow), risk.Category.Function.Title()) - err = excel.SetCellValue(sheetName, "F"+strconv.Itoa(excelRow), "CWE-"+strconv.Itoa(risk.Category.CWE)) - err = excel.SetCellValue(sheetName, "G"+strconv.Itoa(excelRow), risk.Category.Title) + err = excel.SetCellValue(sheetName, "D"+strconv.Itoa(excelRow), category.STRIDE.Title()) + err = excel.SetCellValue(sheetName, "E"+strconv.Itoa(excelRow), category.Function.Title()) + err = excel.SetCellValue(sheetName, "F"+strconv.Itoa(excelRow), "CWE-"+strconv.Itoa(category.CWE)) + err = excel.SetCellValue(sheetName, "G"+strconv.Itoa(excelRow), category.Title) err = excel.SetCellValue(sheetName, "H"+strconv.Itoa(excelRow), techAsset.Title) err = excel.SetCellValue(sheetName, "I"+strconv.Itoa(excelRow), commLink.Title) err = excel.SetCellFloat(sheetName, "J"+strconv.Itoa(excelRow), techAsset.RAA, 0, 32) err = excel.SetCellValue(sheetName, "K"+strconv.Itoa(excelRow), removeFormattingTags(risk.Title)) - err = excel.SetCellValue(sheetName, "L"+strconv.Itoa(excelRow), risk.Category.Action) - err = excel.SetCellValue(sheetName, "M"+strconv.Itoa(excelRow), risk.Category.Mitigation) - err = excel.SetCellValue(sheetName, "N"+strconv.Itoa(excelRow), risk.Category.Check) + err = excel.SetCellValue(sheetName, "L"+strconv.Itoa(excelRow), category.Action) + err = excel.SetCellValue(sheetName, "M"+strconv.Itoa(excelRow), category.Mitigation) + err = excel.SetCellValue(sheetName, "N"+strconv.Itoa(excelRow), category.Check) err = excel.SetCellValue(sheetName, "O"+strconv.Itoa(excelRow), risk.SyntheticId) err = excel.SetCellValue(sheetName, "P"+strconv.Itoa(excelRow), riskTrackingStatus.Title()) - if riskTrackingStatus != model.Unchecked { - riskTracking := risk.GetRiskTracking() + if riskTrackingStatus != types.Unchecked { + riskTracking := risk.GetRiskTracking(parsedModel) err = excel.SetCellValue(sheetName, "Q"+strconv.Itoa(excelRow), riskTracking.Justification) if !riskTracking.Date.IsZero() { err = excel.SetCellValue(sheetName, "R"+strconv.Itoa(excelRow), riskTracking.Date.Format("2006-01-02")) @@ -373,19 +380,19 @@ func WriteRisksExcelToFile(filename string) { // styles if riskTrackingStatus.IsStillAtRisk() { switch risk.Severity { - case model.CriticalSeverity: + case types.CriticalSeverity: err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityCriticalCenter) err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityCriticalBold) - case model.HighSeverity: + case types.HighSeverity: err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityHighCenter) err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityHighBold) - case model.ElevatedSeverity: + case types.ElevatedSeverity: err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityElevatedCenter) err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityElevatedBold) - case model.MediumSeverity: + case types.MediumSeverity: err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityMediumCenter) err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityMediumBold) - case model.LowSeverity: + case types.LowSeverity: err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityLowCenter) err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityLowBold) } @@ -395,17 +402,17 @@ func WriteRisksExcelToFile(filename string) { } styleFromRiskTracking := styleBlackCenter switch riskTrackingStatus { - case model.Unchecked: + case types.Unchecked: styleFromRiskTracking = styleRedCenter - case model.Mitigated: + case types.Mitigated: styleFromRiskTracking = styleGreenCenter - case model.InProgress: + case types.InProgress: styleFromRiskTracking = styleBlueCenter - case model.Accepted: + case types.Accepted: styleFromRiskTracking = styleYellowCenter - case model.InDiscussion: + case types.InDiscussion: styleFromRiskTracking = styleOrangeCenter - case model.FalsePositive: + case types.FalsePositive: styleFromRiskTracking = styleGrayCenter default: styleFromRiskTracking = styleBlackCenter @@ -421,7 +428,9 @@ func WriteRisksExcelToFile(filename string) { err = excel.SetCellStyle(sheetName, "R"+strconv.Itoa(excelRow), "R"+strconv.Itoa(excelRow), styleBlackCenter) err = excel.SetCellStyle(sheetName, "S"+strconv.Itoa(excelRow), "S"+strconv.Itoa(excelRow), styleBlackCenter) err = excel.SetCellStyle(sheetName, "T"+strconv.Itoa(excelRow), "T"+strconv.Itoa(excelRow), styleBlackLeft) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set cell style: %w", err) + } } } @@ -447,39 +456,48 @@ func WriteRisksExcelToFile(filename string) { }) err = excel.SetCellStyle(sheetName, "A1", "T1", styleHeadCenter) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set cell style: %w", err) + } excel.SetActiveSheet(sheetIndex) err = excel.SaveAs(filename) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to save excel file: %w", err) + } + return nil } -func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sortedTagsAvailable) == 0 is: write a hint in the execel that no tags are used - excelRow = 0 +func WriteTagsExcelToFile(parsedModel *types.ParsedModel, filename string) error { // TODO: eventually when len(sortedTagsAvailable) == 0 is: write a hint in the Excel that no tags are used + excelRow := 0 excel := excelize.NewFile() - sheetName := model.ParsedModelRoot.Title + sheetName := parsedModel.Title err := excel.SetDocProps(&excelize.DocProperties{ Category: "Tag Matrix", ContentStatus: "Final", - Creator: model.ParsedModelRoot.Author.Name, + Creator: parsedModel.Author.Name, Description: sheetName + " via Threagile", Identifier: "xlsx", Keywords: "Tag Matrix", - LastModifiedBy: model.ParsedModelRoot.Author.Name, + LastModifiedBy: parsedModel.Author.Name, Revision: "0", Subject: sheetName, Title: sheetName, Language: "en-US", Version: "1.0.0", }) - checkErr(err) + if err != nil { + return err + } sheetIndex, _ := excel.NewSheet(sheetName) _ = excel.DeleteSheet("Sheet1") orientation := "landscape" size := 9 err = excel.SetPageLayout(sheetName, &excelize.PageLayoutOptions{Orientation: &orientation, Size: &size}) // A4 - checkErr(err) + if err != nil { + return err + } err = excel.SetHeaderFooter(sheetName, &excelize.HeaderFooterOptions{ DifferentFirst: false, @@ -488,12 +506,14 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted OddFooter: "&C&F", EvenHeader: "&L&P", EvenFooter: "&L&D&R&T", - FirstHeader: `&Tag Matrix &"-,` + model.ParsedModelRoot.Title + `"Bold&"-,Regular"Summary+000A&D`, + FirstHeader: `&Tag Matrix &"-,` + parsedModel.Title + `"Bold&"-,Regular"Summary+000A&D`, }) - checkErr(err) + if err != nil { + return err + } - err = excel.SetCellValue(sheetName, "A1", "Element") // TODO is "Element" the correct generic name when referencing assets, links, trust boudaries etc.? Eventually add separate column "type of element" like "technical asset" or "data asset"? - sortedTagsAvailable := model.TagsActuallyUsed() + err = excel.SetCellValue(sheetName, "A1", "Element") // TODO is "Element" the correct generic name when referencing assets, links, trust boundaries etc.? Eventually add separate column "type of element" like "technical asset" or "data asset"? + sortedTagsAvailable := parsedModel.TagsActuallyUsed() sort.Strings(sortedTagsAvailable) axis := "" for i, tag := range sortedTagsAvailable { @@ -502,10 +522,16 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted } err = excel.SetColWidth(sheetName, "A", "A", 60) + if err != nil { + return err + } + if len(sortedTagsAvailable) > 0 { err = excel.SetColWidth(sheetName, "B", axis, 35) } - checkErr(err) + if err != nil { + return err + } // styleBlackCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#000000","size":12}}`) styleBlackCenter, err := excel.NewStyle(&excelize.Style{ @@ -535,20 +561,35 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted excelRow++ // as we have a header line if len(sortedTagsAvailable) > 0 { - for _, techAsset := range model.SortedTechnicalAssetsByTitle() { - writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, techAsset.Title, techAsset.Tags) + for _, techAsset := range sortedTechnicalAssetsByTitle(parsedModel) { + err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, techAsset.Title, techAsset.Tags) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } for _, commLink := range techAsset.CommunicationLinksSorted() { - writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, commLink.Title, commLink.Tags) + err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, commLink.Title, commLink.Tags) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } } } - for _, dataAsset := range model.SortedDataAssetsByTitle() { - writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, dataAsset.Title, dataAsset.Tags) + for _, dataAsset := range sortedDataAssetsByTitle(parsedModel) { + err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, dataAsset.Title, dataAsset.Tags) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } } - for _, trustBoundary := range model.SortedTrustBoundariesByTitle() { - writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, trustBoundary.Title, trustBoundary.Tags) + for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) { + err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, trustBoundary.Title, trustBoundary.Tags) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } } - for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() { - writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, sharedRuntime.Title, sharedRuntime.Tags) + for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) { + err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, sharedRuntime.Title, sharedRuntime.Tags) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } } } @@ -569,6 +610,9 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted Pattern: 1, }, }) + if err != nil { + return fmt.Errorf("unable to set cell style: %w", err) + } // styleHeadCenterBold, err := excel.NewStyle(`{"font":{"bold":true,"italic":false,"size":14,"color":"#000000"},"fill":{"type":"pattern","color":["#eeeeee"],"pattern":1},"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false}}`) styleHeadCenterBold, err := excel.NewStyle(&excelize.Style{ Alignment: &excelize.Alignment{ @@ -591,31 +635,63 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted if len(sortedTagsAvailable) > 0 { err = excel.SetCellStyle(sheetName, "B1", axis+"1", styleHeadCenter) } - checkErr(err) + if err != nil { + return fmt.Errorf("unable to set cell style: %w", err) + } excel.SetActiveSheet(sheetIndex) err = excel.SaveAs(filename) - checkErr(err) + if err != nil { + return fmt.Errorf("unable to save excel file: %w", err) + } + return nil } -func writeRow(excel *excelize.File, sheetName string, axis string, styleBlackLeftBold int, styleBlackCenter int, - sortedTags []string, assetTitle string, tagsUsed []string) { - excelRow++ - err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(excelRow), assetTitle) +func sortedTrustBoundariesByTitle(parsedModel *types.ParsedModel) []types.TrustBoundary { + boundaries := make([]types.TrustBoundary, 0) + for _, boundary := range parsedModel.TrustBoundaries { + boundaries = append(boundaries, boundary) + } + sort.Sort(types.ByTrustBoundaryTitleSort(boundaries)) + return boundaries +} + +func sortedDataAssetsByTitle(parsedModel *types.ParsedModel) []types.DataAsset { + assets := make([]types.DataAsset, 0) + for _, asset := range parsedModel.DataAssets { + assets = append(assets, asset) + } + sort.Sort(types.ByDataAssetTitleSort(assets)) + return assets +} + +func writeRow(excel *excelize.File, excelRow *int, sheetName string, axis string, styleBlackLeftBold int, styleBlackCenter int, + sortedTags []string, assetTitle string, tagsUsed []string) error { + *excelRow++ + err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(*excelRow), assetTitle) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } for i, tag := range sortedTags { - if model.Contains(tagsUsed, tag) { - err = excel.SetCellValue(sheetName, determineColumnLetter(i)+strconv.Itoa(excelRow), "X") + if contains(tagsUsed, tag) { + err = excel.SetCellValue(sheetName, determineColumnLetter(i)+strconv.Itoa(*excelRow), "X") + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } } } - err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "A"+strconv.Itoa(excelRow), styleBlackLeftBold) - err = excel.SetCellStyle(sheetName, "B"+strconv.Itoa(excelRow), axis+strconv.Itoa(excelRow), styleBlackCenter) - checkErr(err) + err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(*excelRow), "A"+strconv.Itoa(*excelRow), styleBlackLeftBold) + err = excel.SetCellStyle(sheetName, "B"+strconv.Itoa(*excelRow), axis+strconv.Itoa(*excelRow), styleBlackCenter) + if err != nil { + return fmt.Errorf("unable to write row: %w", err) + } + return nil } var alphabet = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"} func determineColumnLetter(i int) string { - // can only have 700 columns in excel that way, but that should be more than usable anyway ;)... otherwise think about your model... + // can only have 700 columns in Excel that way, but that should be more than usable anyway ;)... otherwise think about your model... i++ if i < 26 { return alphabet[i] diff --git a/pkg/report/generate.go b/pkg/report/generate.go new file mode 100644 index 00000000..d1c9ef44 --- /dev/null +++ b/pkg/report/generate.go @@ -0,0 +1,192 @@ +package report + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/model" +) + +type GenerateCommands struct { + DataFlowDiagram bool + DataAssetDiagram bool + RisksJSON bool + TechnicalAssetsJSON bool + StatsJSON bool + RisksExcel bool + TagsExcel bool + ReportPDF bool +} + +func (c *GenerateCommands) Defaults() *GenerateCommands { + *c = GenerateCommands{ + DataFlowDiagram: true, + DataAssetDiagram: true, + RisksJSON: true, + TechnicalAssetsJSON: true, + StatsJSON: true, + RisksExcel: true, + TagsExcel: true, + ReportPDF: true, + } + return c +} + +func Generate(config *common.Config, readResult *model.ReadResult, commands *GenerateCommands, progressReporter progressReporter) error { + generateDataFlowDiagram := commands.DataFlowDiagram + generateDataAssetsDiagram := commands.DataAssetDiagram + if commands.ReportPDF { // as the PDF report includes both diagrams + generateDataFlowDiagram = true + generateDataAssetsDiagram = true + } + + diagramDPI := config.DiagramDPI + if diagramDPI < common.MinGraphvizDPI { + diagramDPI = common.MinGraphvizDPI + } else if diagramDPI > common.MaxGraphvizDPI { + diagramDPI = common.MaxGraphvizDPI + } + // Data-flow Diagram rendering + if generateDataFlowDiagram { + gvFile := filepath.Join(config.OutputFolder, config.DataFlowDiagramFilenameDOT) + if !config.KeepDiagramSourceFiles { + tmpFileGV, err := os.CreateTemp(config.TempFolder, config.DataFlowDiagramFilenameDOT) + if err != nil { + return err + } + gvFile = tmpFileGV.Name() + defer func() { _ = os.Remove(gvFile) }() + } + dotFile, err := WriteDataFlowDiagramGraphvizDOT(readResult.ParsedModel, gvFile, diagramDPI, config.AddModelTitle, progressReporter) + if err != nil { + return fmt.Errorf("error while generating data flow diagram: %s", err) + } + + err = GenerateDataFlowDiagramGraphvizImage(dotFile, config.OutputFolder, + config.TempFolder, config.BinFolder, config.DataFlowDiagramFilenamePNG, progressReporter) + if err != nil { + progressReporter.Warn(err) + } + } + // Data Asset Diagram rendering + if generateDataAssetsDiagram { + gvFile := filepath.Join(config.OutputFolder, config.DataAssetDiagramFilenameDOT) + if !config.KeepDiagramSourceFiles { + tmpFile, err := os.CreateTemp(config.TempFolder, config.DataAssetDiagramFilenameDOT) + if err != nil { + return err + } + gvFile = tmpFile.Name() + defer func() { _ = os.Remove(gvFile) }() + } + dotFile, err := WriteDataAssetDiagramGraphvizDOT(readResult.ParsedModel, gvFile, diagramDPI, progressReporter) + if err != nil { + return fmt.Errorf("error while generating data asset diagram: %s", err) + } + err = GenerateDataAssetDiagramGraphvizImage(dotFile, config.OutputFolder, + config.TempFolder, config.BinFolder, config.DataAssetDiagramFilenamePNG, progressReporter) + if err != nil { + progressReporter.Warn(err) + } + } + + // risks as risks json + if commands.RisksJSON { + progressReporter.Info("Writing risks json") + err := WriteRisksJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonRisksFilename)) + if err != nil { + return fmt.Errorf("error while writing risks json: %s", err) + } + } + + // technical assets json + if commands.TechnicalAssetsJSON { + progressReporter.Info("Writing technical assets json") + err := WriteTechnicalAssetsJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonTechnicalAssetsFilename)) + if err != nil { + return fmt.Errorf("error while writing technical assets json: %s", err) + } + } + + // risks as risks json + if commands.StatsJSON { + progressReporter.Info("Writing stats json") + err := WriteStatsJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonStatsFilename)) + if err != nil { + return fmt.Errorf("error while writing stats json: %s", err) + } + } + + // risks Excel + if commands.RisksExcel { + progressReporter.Info("Writing risks excel") + err := WriteRisksExcelToFile(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.ExcelRisksFilename)) + if err != nil { + return err + } + } + + // tags Excel + if commands.TagsExcel { + progressReporter.Info("Writing tags excel") + err := WriteTagsExcelToFile(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.ExcelTagsFilename)) + if err != nil { + return err + } + } + + if commands.ReportPDF { + // hash the YAML input file + f, err := os.Open(config.InputFile) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + hasher := sha256.New() + if _, err := io.Copy(hasher, f); err != nil { + return err + } + modelHash := hex.EncodeToString(hasher.Sum(nil)) + // report PDF + progressReporter.Info("Writing report pdf") + + pdfReporter := pdfReporter{} + err = pdfReporter.WriteReportPDF(filepath.Join(config.OutputFolder, config.ReportFilename), + filepath.Join(config.AppFolder, config.TemplateFilename), + filepath.Join(config.OutputFolder, config.DataFlowDiagramFilenamePNG), + filepath.Join(config.OutputFolder, config.DataAssetDiagramFilenamePNG), + config.InputFile, + config.SkipRiskRules, + config.BuildTimestamp, + modelHash, + readResult.IntroTextRAA, + readResult.CustomRiskRules, + config.TempFolder, + readResult.ParsedModel) + if err != nil { + return err + } + } + + return nil +} + +type progressReporter interface { + Info(a ...any) + Warn(a ...any) + Error(a ...any) +} + +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} diff --git a/pkg/report/graphviz.go b/pkg/report/graphviz.go new file mode 100644 index 00000000..7ffc2d18 --- /dev/null +++ b/pkg/report/graphviz.go @@ -0,0 +1,603 @@ +package report + +import ( + "errors" + "fmt" + "hash/fnv" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/threagile/threagile/pkg/security/types" +) + +func WriteDataFlowDiagramGraphvizDOT(parsedModel *types.ParsedModel, + diagramFilenameDOT string, dpi int, addModelTitle bool, + progressReporter progressReporter) (*os.File, error) { + progressReporter.Info("Writing data flow diagram input") + + var dotContent strings.Builder + dotContent.WriteString("digraph generatedModel { concentrate=false \n") + + // Metadata init =============================================================================== + tweaks := "" + if parsedModel.DiagramTweakNodesep > 0 { + tweaks += "\n nodesep=\"" + strconv.Itoa(parsedModel.DiagramTweakNodesep) + "\"" + } + if parsedModel.DiagramTweakRanksep > 0 { + tweaks += "\n ranksep=\"" + strconv.Itoa(parsedModel.DiagramTweakRanksep) + "\"" + } + suppressBidirectionalArrows := true + drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks := true + splines := "ortho" + if len(parsedModel.DiagramTweakEdgeLayout) > 0 { + switch parsedModel.DiagramTweakEdgeLayout { + case "spline": + splines = "spline" + drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false + case "polyline": + splines = "polyline" + drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false + case "ortho": + splines = "ortho" + suppressBidirectionalArrows = true + case "curved": + splines = "curved" + drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false + case "false": + splines = "false" + drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false + default: + return nil, fmt.Errorf("unknown value for diagram_tweak_suppress_edge_labels (spline, polyline, ortho, curved, false): %s", parsedModel.DiagramTweakEdgeLayout) + } + } + rankdir := "TB" + if parsedModel.DiagramTweakLayoutLeftToRight { + rankdir = "LR" + } + modelTitle := "" + if addModelTitle { + modelTitle = `label="` + parsedModel.Title + `"` + } + dotContent.WriteString(` graph [ ` + modelTitle + ` + labelloc=t + fontname="Verdana" + fontsize=40 + outputorder="nodesfirst" + dpi=` + strconv.Itoa(dpi) + ` + splines=` + splines + ` + rankdir="` + rankdir + `" +` + tweaks + ` + ]; + node [ + fontname="Verdana" + fontsize="20" + ]; + edge [ + shape="none" + fontname="Verdana" + fontsize="18" + ]; +`) + + // Trust Boundaries =============================================================================== + var subgraphSnippetsById = make(map[string]string) + // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order + // range over them in sorted (hence re-producible) way: + keys := make([]string, 0) + for k := range parsedModel.TrustBoundaries { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + trustBoundary := parsedModel.TrustBoundaries[key] + var snippet strings.Builder + if len(trustBoundary.TechnicalAssetsInside) > 0 || len(trustBoundary.TrustBoundariesNested) > 0 { + if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks { + // see https://stackoverflow.com/questions/17247455/how-do-i-add-extra-space-between-clusters?noredirect=1&lq=1 + snippet.WriteString("\n subgraph cluster_space_boundary_for_layout_only_1" + hash(trustBoundary.Id) + " {\n") + snippet.WriteString(` graph [ + dpi=` + strconv.Itoa(dpi) + ` + label=<
> + fontsize="21" + style="invis" + color="green" + fontcolor="green" + margin="50.0" + penwidth="6.5" + outputorder="nodesfirst" + ];`) + } + snippet.WriteString("\n subgraph cluster_" + hash(trustBoundary.Id) + " {\n") + color, fontColor, bgColor, style, fontname := RgbHexColorTwilight(), RgbHexColorTwilight() /*"#550E0C"*/, "#FAFAFA", "dashed", "Verdana" + penWidth := 4.5 + if len(trustBoundary.TrustBoundariesNested) > 0 { + //color, fontColor, style, fontname = Blue, Blue, "dashed", "Verdana" + penWidth = 5.5 + } + if len(trustBoundary.ParentTrustBoundaryID(parsedModel)) > 0 { + bgColor = "#F1F1F1" + } + if trustBoundary.Type == types.NetworkPolicyNamespaceIsolation { + fontColor, bgColor = "#222222", "#DFF4FF" + } + if trustBoundary.Type == types.ExecutionEnvironment { + fontColor, bgColor, style = "#555555", "#FFFFF0", "dotted" + } + snippet.WriteString(` graph [ + dpi=` + strconv.Itoa(dpi) + ` + label=<
` + trustBoundary.Title + ` (` + trustBoundary.Type.String() + `)
> + fontsize="21" + style="` + style + `" + color="` + color + `" + bgcolor="` + bgColor + `" + fontcolor="` + fontColor + `" + fontname="` + fontname + `" + penwidth="` + fmt.Sprintf("%f", penWidth) + `" + forcelabels=true + outputorder="nodesfirst" + margin="50.0" + ];`) + snippet.WriteString("\n") + keys := trustBoundary.TechnicalAssetsInside + sort.Strings(keys) + for _, technicalAssetInside := range keys { + //log.Println("About to add technical asset link to trust boundary: ", technicalAssetInside) + technicalAsset := parsedModel.TechnicalAssets[technicalAssetInside] + snippet.WriteString(hash(technicalAsset.Id)) + snippet.WriteString(";\n") + } + keys = trustBoundary.TrustBoundariesNested + sort.Strings(keys) + for _, trustBoundaryNested := range keys { + //log.Println("About to add nested trust boundary to trust boundary: ", trustBoundaryNested) + trustBoundaryNested := parsedModel.TrustBoundaries[trustBoundaryNested] + snippet.WriteString("LINK-NEEDS-REPLACED-BY-cluster_" + hash(trustBoundaryNested.Id)) + snippet.WriteString(";\n") + } + snippet.WriteString(" }\n\n") + if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks { + snippet.WriteString(" }\n\n") + } + } + subgraphSnippetsById[hash(trustBoundary.Id)] = snippet.String() + } + // here replace links and remove from map after replacement (i.e. move snippet into nested) + for i := range subgraphSnippetsById { + re := regexp.MustCompile(`LINK-NEEDS-REPLACED-BY-cluster_([0-9]*);`) + for { + matches := re.FindStringSubmatch(subgraphSnippetsById[i]) + if len(matches) > 0 { + embeddedSnippet := " //nested:" + subgraphSnippetsById[matches[1]] + subgraphSnippetsById[i] = strings.ReplaceAll(subgraphSnippetsById[i], matches[0], embeddedSnippet) + subgraphSnippetsById[matches[1]] = "" // to something like remove it + } else { + break + } + } + } + // now write them all + keys = make([]string, 0) + for k := range subgraphSnippetsById { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + snippet := subgraphSnippetsById[key] + dotContent.WriteString(snippet) + } + + // Technical Assets =============================================================================== + // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order + // range over them in sorted (hence re-producible) way: + // Convert map to slice of values: + var techAssets []types.TechnicalAsset + for _, techAsset := range parsedModel.TechnicalAssets { + techAssets = append(techAssets, techAsset) + } + sort.Sort(types.ByOrderAndIdSort(techAssets)) + for _, technicalAsset := range techAssets { + dotContent.WriteString(makeTechAssetNode(parsedModel, technicalAsset, false)) + dotContent.WriteString("\n") + } + + // Data Flows (Technical Communication Links) =============================================================================== + for _, technicalAsset := range techAssets { + for _, dataFlow := range technicalAsset.CommunicationLinks { + sourceId := technicalAsset.Id + targetId := dataFlow.TargetId + //log.Println("About to add link from", sourceId, "to", targetId, "with id", dataFlow.Id) + var arrowStyle, arrowColor, readOrWriteHead, readOrWriteTail string + if dataFlow.Readonly { + readOrWriteHead = "empty" + readOrWriteTail = "odot" + } else { + readOrWriteHead = "normal" + readOrWriteTail = "dot" + } + dir := "forward" + if dataFlow.IsBidirectional() { + if !suppressBidirectionalArrows { // as it does not work as bug in graphviz with ortho: https://gitlab.com/graphviz/graphviz/issues/144 + dir = "both" + } + } + arrowStyle = ` style="` + determineArrowLineStyle(dataFlow) + `" penwidth="` + determineArrowPenWidth(dataFlow, parsedModel) + `" arrowtail="` + readOrWriteTail + `" arrowhead="` + readOrWriteHead + `" dir="` + dir + `" arrowsize="2.0" ` + arrowColor = ` color="` + determineArrowColor(dataFlow, parsedModel) + `"` + tweaks := "" + if dataFlow.DiagramTweakWeight > 0 { + tweaks += " weight=\"" + strconv.Itoa(dataFlow.DiagramTweakWeight) + "\" " + } + + dotContent.WriteString("\n") + dotContent.WriteString(" " + hash(sourceId) + " -> " + hash(targetId) + + ` [` + arrowColor + ` ` + arrowStyle + tweaks + ` constraint=` + strconv.FormatBool(dataFlow.DiagramTweakConstraint) + ` `) + if !parsedModel.DiagramTweakSuppressEdgeLabels { + dotContent.WriteString(` xlabel="` + encode(dataFlow.Protocol.String()) + `" fontcolor="` + determineLabelColor(dataFlow, parsedModel) + `" `) + } + dotContent.WriteString(" ];\n") + } + } + + diagramInvisibleConnectionsTweaks, err := makeDiagramInvisibleConnectionsTweaks(parsedModel) + if err != nil { + return nil, fmt.Errorf("error while making diagram invisible connections tweaks: %s", err) + } + dotContent.WriteString(diagramInvisibleConnectionsTweaks) + + diagramSameRankNodeTweaks, err := makeDiagramSameRankNodeTweaks(parsedModel) + if err != nil { + return nil, fmt.Errorf("error while making diagram same-rank node tweaks: %s", err) + } + dotContent.WriteString(diagramSameRankNodeTweaks) + + dotContent.WriteString("}") + + //fmt.Println(dotContent.String()) + + // Write the DOT file + file, err := os.Create(filepath.Clean(diagramFilenameDOT)) + if err != nil { + return nil, fmt.Errorf("Error creating %s: %v", diagramFilenameDOT, err) + } + defer func() { _ = file.Close() }() + _, err = fmt.Fprintln(file, dotContent.String()) + if err != nil { + return nil, fmt.Errorf("Error writing %s: %v", diagramFilenameDOT, err) + } + return file, nil +} + +func GenerateDataFlowDiagramGraphvizImage(dotFile *os.File, targetDir string, + tempFolder, binFolder, dataFlowDiagramFilenamePNG string, progressReporter progressReporter) error { + progressReporter.Info("Rendering data flow diagram input") + // tmp files + tmpFileDOT, err := os.CreateTemp(tempFolder, "diagram-*-.gv") + if err != nil { + return fmt.Errorf("Error creating temp file: %v", err) + } + defer func() { _ = os.Remove(tmpFileDOT.Name()) }() + + tmpFilePNG, err := os.CreateTemp(tempFolder, "diagram-*-.png") + if err != nil { + return fmt.Errorf("Error creating temp file: %v", err) + } + defer func() { _ = os.Remove(tmpFilePNG.Name()) }() + + // copy into tmp file as input + inputDOT, err := os.ReadFile(dotFile.Name()) + if err != nil { + return fmt.Errorf("Error reading %s: %v", dotFile.Name(), err) + } + err = os.WriteFile(tmpFileDOT.Name(), inputDOT, 0600) + if err != nil { + return fmt.Errorf("Error creating %s: %v", tmpFileDOT.Name(), err) + } + + // exec + + cmd := exec.Command("dot", "-Tpng", tmpFileDOT.Name(), "-o", tmpFilePNG.Name()) // #nosec G204 + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return errors.New("graph rendering call failed with error: " + err.Error()) + } + // copy into resulting file + inputPNG, err := os.ReadFile(tmpFilePNG.Name()) + if err != nil { + return fmt.Errorf("Error copying into resulting file %s: %v", tmpFilePNG.Name(), err) + } + err = os.WriteFile(filepath.Join(targetDir, dataFlowDiagramFilenamePNG), inputPNG, 0600) + if err != nil { + return fmt.Errorf("Error creating %s: %v", filepath.Join(targetDir, dataFlowDiagramFilenamePNG), err) + } + return nil +} + +func makeDiagramSameRankNodeTweaks(parsedModel *types.ParsedModel) (string, error) { + // see https://stackoverflow.com/questions/25734244/how-do-i-place-nodes-on-the-same-level-in-dot + tweak := "" + if len(parsedModel.DiagramTweakSameRankAssets) > 0 { + for _, sameRank := range parsedModel.DiagramTweakSameRankAssets { + assetIDs := strings.Split(sameRank, ":") + if len(assetIDs) > 0 { + tweak += "{ rank=same; " + for _, id := range assetIDs { + err := parsedModel.CheckTechnicalAssetExists(id, "diagram tweak same-rank", true) + if err != nil { + return "", fmt.Errorf("error while checking technical asset existence: %s", err) + } + if len(parsedModel.TechnicalAssets[id].GetTrustBoundaryId(parsedModel)) > 0 { + return "", fmt.Errorf("technical assets (referenced in same rank diagram tweak) are inside trust boundaries: " + + fmt.Sprintf("%v", parsedModel.DiagramTweakSameRankAssets)) + } + tweak += " " + hash(id) + "; " + } + tweak += " }" + } + } + } + return tweak, nil +} + +func makeDiagramInvisibleConnectionsTweaks(parsedModel *types.ParsedModel) (string, error) { + // see https://stackoverflow.com/questions/2476575/how-to-control-node-placement-in-graphviz-i-e-avoid-edge-crossings + tweak := "" + if len(parsedModel.DiagramTweakInvisibleConnectionsBetweenAssets) > 0 { + for _, invisibleConnections := range parsedModel.DiagramTweakInvisibleConnectionsBetweenAssets { + assetIDs := strings.Split(invisibleConnections, ":") + if len(assetIDs) == 2 { + err := parsedModel.CheckTechnicalAssetExists(assetIDs[0], "diagram tweak connections", true) + if err != nil { + return "", fmt.Errorf("error while checking technical asset existence: %s", err) + } + err = parsedModel.CheckTechnicalAssetExists(assetIDs[1], "diagram tweak connections", true) + if err != nil { + return "", fmt.Errorf("error while checking technical asset existence: %s", err) + } + + tweak += "\n" + hash(assetIDs[0]) + " -> " + hash(assetIDs[1]) + " [style=invis]; \n" + } + } + } + return tweak, nil +} + +func WriteDataAssetDiagramGraphvizDOT(parsedModel *types.ParsedModel, diagramFilenameDOT string, dpi int, + progressReporter progressReporter) (*os.File, error) { + progressReporter.Info("Writing data asset diagram input") + + var dotContent strings.Builder + dotContent.WriteString("digraph generatedModel { concentrate=true \n") + + // Metadata init =============================================================================== + dotContent.WriteString(` graph [ + dpi=` + strconv.Itoa(dpi) + ` + fontname="Verdana" + labelloc="c" + fontsize="20" + splines=false + rankdir="LR" + nodesep=1.0 + ranksep=3.0 + outputorder="nodesfirst" + ]; + node [ + fontcolor="white" + fontname="Verdana" + fontsize="20" + ]; + edge [ + shape="none" + fontname="Verdana" + fontsize="18" + ]; +`) + + // Technical Assets =============================================================================== + techAssets := make([]types.TechnicalAsset, 0) + for _, techAsset := range parsedModel.TechnicalAssets { + techAssets = append(techAssets, techAsset) + } + sort.Sort(types.ByOrderAndIdSort(techAssets)) + for _, technicalAsset := range techAssets { + if len(technicalAsset.DataAssetsStored) > 0 || len(technicalAsset.DataAssetsProcessed) > 0 { + dotContent.WriteString(makeTechAssetNode(parsedModel, technicalAsset, true)) + dotContent.WriteString("\n") + } + } + + // Data Assets =============================================================================== + dataAssets := make([]types.DataAsset, 0) + for _, dataAsset := range parsedModel.DataAssets { + dataAssets = append(dataAssets, dataAsset) + } + + types.SortByDataAssetDataBreachProbabilityAndTitle(parsedModel, dataAssets) + for _, dataAsset := range dataAssets { + dotContent.WriteString(makeDataAssetNode(parsedModel, dataAsset)) + dotContent.WriteString("\n") + } + + // Data Asset to Tech Asset links =============================================================================== + for _, technicalAsset := range techAssets { + for _, sourceId := range technicalAsset.DataAssetsStored { + targetId := technicalAsset.Id + dotContent.WriteString("\n") + dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) + + ` [ color="blue" style="solid" ];`) + dotContent.WriteString("\n") + } + for _, sourceId := range technicalAsset.DataAssetsProcessed { + if !contains(technicalAsset.DataAssetsStored, sourceId) { // here only if not already drawn above + targetId := technicalAsset.Id + dotContent.WriteString("\n") + dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) + + ` [ color="#666666" style="dashed" ];`) + dotContent.WriteString("\n") + } + } + } + + dotContent.WriteString("}") + + // Write the DOT file + file, err := os.Create(filepath.Clean(diagramFilenameDOT)) + if err != nil { + return nil, fmt.Errorf("Error creating %s: %v", diagramFilenameDOT, err) + } + defer func() { _ = file.Close() }() + _, err = fmt.Fprintln(file, dotContent.String()) + if err != nil { + return nil, fmt.Errorf("Error writing %s: %v", diagramFilenameDOT, err) + } + return file, nil +} + +func makeDataAssetNode(parsedModel *types.ParsedModel, dataAsset types.DataAsset) string { + var color string + switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) { + case types.Probable: + color = RgbHexColorHighRisk() + case types.Possible: + color = RgbHexColorMediumRisk() + case types.Improbable: + color = RgbHexColorLowRisk() + default: + color = "#444444" // since black is too dark here as fill color + } + if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) { + color = "#444444" // since black is too dark here as fill color + } + return " " + hash(dataAsset.Id) + ` [ label=<` + encode(dataAsset.Title) + `> penwidth="3.0" style="filled" fillcolor="` + color + `" color="` + color + "\"\n ]; " +} + +func makeTechAssetNode(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, simplified bool) string { + if simplified { + color := RgbHexColorOutOfScope() + if !technicalAsset.OutOfScope { + generatedRisks := technicalAsset.GeneratedRisks(parsedModel) + switch types.HighestSeverityStillAtRisk(parsedModel, generatedRisks) { + case types.CriticalSeverity: + color = RgbHexColorCriticalRisk() + case types.HighSeverity: + color = RgbHexColorHighRisk() + case types.ElevatedSeverity: + color = RgbHexColorElevatedRisk() + case types.MediumSeverity: + color = RgbHexColorMediumRisk() + case types.LowSeverity: + color = RgbHexColorLowRisk() + default: + color = "#444444" // since black is too dark here as fill color + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, generatedRisks)) == 0 { + color = "#444444" // since black is too dark here as fill color + } + } + return " " + hash(technicalAsset.Id) + ` [ shape="box" style="filled" fillcolor="` + color + `" + label=<` + encode(technicalAsset.Title) + `> penwidth="3.0" color="` + color + `" ]; + ` + } else { + var shape, title string + var lineBreak = "" + switch technicalAsset.Type { + case types.ExternalEntity: + shape = "box" + title = technicalAsset.Title + case types.Process: + shape = "ellipse" + title = technicalAsset.Title + case types.Datastore: + shape = "cylinder" + title = technicalAsset.Title + if technicalAsset.Redundant { + lineBreak = "
" + } + } + + if technicalAsset.UsedAsClientByHuman { + shape = "octagon" + } + + // RAA = Relative Attacker Attractiveness + raa := technicalAsset.RAA + var attackerAttractivenessLabel string + if technicalAsset.OutOfScope { + attackerAttractivenessLabel = "RAA: out of scope" + } else { + attackerAttractivenessLabel = "RAA: " + fmt.Sprintf("%.0f", raa) + " %" + } + + compartmentBorder := "0" + if technicalAsset.MultiTenant { + compartmentBorder = "1" + } + + return " " + hash(technicalAsset.Id) + ` [ + label=<
` + lineBreak + technicalAsset.Technology.String() + `
` + technicalAsset.Size.String() + `
` + encode(title) + `
` + attackerAttractivenessLabel + `
> + shape=` + shape + ` style="` + determineShapeBorderLineStyle(technicalAsset) + `,` + determineShapeStyle(technicalAsset) + `" penwidth="` + determineShapeBorderPenWidth(technicalAsset, parsedModel) + `" fillcolor="` + determineShapeFillColor(technicalAsset, parsedModel) + `" + peripheries=` + strconv.Itoa(determineShapePeripheries(technicalAsset)) + ` + color="` + determineShapeBorderColor(technicalAsset, parsedModel) + "\"\n ]; " + } +} + +func GenerateDataAssetDiagramGraphvizImage(dotFile *os.File, targetDir string, + tempFolder, binFolder, dataAssetDiagramFilenamePNG string, progressReporter progressReporter) error { // TODO dedupe with other render...() method here + progressReporter.Info("Rendering data asset diagram input") + // tmp files + tmpFileDOT, err := os.CreateTemp(tempFolder, "diagram-*-.gv") + if err != nil { + return fmt.Errorf("Error creating temp file: %v", err) + } + defer func() { _ = os.Remove(tmpFileDOT.Name()) }() + + tmpFilePNG, err := os.CreateTemp(tempFolder, "diagram-*-.png") + if err != nil { + return fmt.Errorf("Error creating temp file: %v", err) + } + defer func() { _ = os.Remove(tmpFilePNG.Name()) }() + + // copy into tmp file as input + inputDOT, err := os.ReadFile(dotFile.Name()) + if err != nil { + return fmt.Errorf("Error reading %s: %v", dotFile.Name(), err) + } + err = os.WriteFile(tmpFileDOT.Name(), inputDOT, 0600) + if err != nil { + return fmt.Errorf("Error creating %s: %v", tmpFileDOT.Name(), err) + } + + // exec + cmd := exec.Command("dot", "-Tpng", tmpFileDOT.Name(), "-o", tmpFilePNG.Name()) // #nosec G204 + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return errors.New("graph rendering call failed with error: " + err.Error()) + } + // copy into resulting file + inputPNG, err := os.ReadFile(tmpFilePNG.Name()) + if err != nil { + return fmt.Errorf("Error copying into resulting file %s: %v", tmpFilePNG.Name(), err) + } + err = os.WriteFile(filepath.Join(targetDir, dataAssetDiagramFilenamePNG), inputPNG, 0600) + if err != nil { + return fmt.Errorf("Error creating %s: %v", filepath.Join(targetDir, dataAssetDiagramFilenamePNG), err) + } + return nil +} + +func hash(s string) string { + h := fnv.New32a() + _, _ = h.Write([]byte(s)) + return fmt.Sprintf("%v", h.Sum32()) +} + +func encode(value string) string { + return strings.ReplaceAll(value, "&", "&") +} diff --git a/pkg/report/json.go b/pkg/report/json.go new file mode 100644 index 00000000..8a27d655 --- /dev/null +++ b/pkg/report/json.go @@ -0,0 +1,56 @@ +package report + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/threagile/threagile/pkg/security/types" +) + +func WriteRisksJSON(parsedModel *types.ParsedModel, filename string) error { + /* + remainingRisks := make([]model.Risk, 0) + for _, category := range model.SortedRiskCategories() { + risks := model.SortedRisksOfCategory(category) + for _, risk := range model.ReduceToOnlyStillAtRisk(risks) { + remainingRisks = append(remainingRisks, risk) + } + } + */ + jsonBytes, err := json.Marshal(types.AllRisks(parsedModel)) + if err != nil { + return fmt.Errorf("failed to marshal risks to JSON: %w", err) + } + err = os.WriteFile(filename, jsonBytes, 0600) + if err != nil { + return fmt.Errorf("failed to write risks to JSON file: %w", err) + } + return nil +} + +// TODO: also a "data assets" json? + +func WriteTechnicalAssetsJSON(parsedModel *types.ParsedModel, filename string) error { + jsonBytes, err := json.Marshal(parsedModel.TechnicalAssets) + if err != nil { + return fmt.Errorf("failed to marshal technical assets to JSON: %w", err) + } + err = os.WriteFile(filename, jsonBytes, 0600) + if err != nil { + return fmt.Errorf("failed to write technical assets to JSON file: %w", err) + } + return nil +} + +func WriteStatsJSON(parsedModel *types.ParsedModel, filename string) error { + jsonBytes, err := json.Marshal(types.OverallRiskStatistics(parsedModel)) + if err != nil { + return fmt.Errorf("failed to marshal stats to JSON: %w", err) + } + err = os.WriteFile(filename, jsonBytes, 0600) + if err != nil { + return fmt.Errorf("failed to write stats to JSON file: %w", err) + } + return nil +} diff --git a/pkg/report/report.go b/pkg/report/report.go new file mode 100644 index 00000000..907056f2 --- /dev/null +++ b/pkg/report/report.go @@ -0,0 +1,4601 @@ +package report + +import ( + "fmt" + "image" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/jung-kurt/gofpdf" + "github.com/jung-kurt/gofpdf/contrib/gofpdi" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/model" + "github.com/threagile/threagile/pkg/security/risks" + "github.com/threagile/threagile/pkg/security/types" + "github.com/wcharczuk/go-chart" + "github.com/wcharczuk/go-chart/drawing" +) + +const fontSizeHeadline, fontSizeHeadlineSmall, fontSizeBody, fontSizeSmall, fontSizeVerySmall = 20, 16, 12, 9, 7 +const /*dataFlowDiagramFullscreen,*/ allowedPdfLandscapePages, embedDiagramLegendPage = /*false,*/ true, false + +type pdfReporter struct { + isLandscapePage bool + pdf *gofpdf.Fpdf + coverTemplateId int + contentTemplateId int + diagramLegendTemplateId int + pageNo int + linkCounter int + tocLinkIdByAssetId map[string]int + homeLink int + currentChapterTitleBreadcrumb string +} + +func (r *pdfReporter) initReport() { + r.pdf = nil + r.isLandscapePage = false + r.pageNo = 0 + r.linkCounter = 0 + r.homeLink = 0 + r.currentChapterTitleBreadcrumb = "" + r.tocLinkIdByAssetId = make(map[string]int) +} + +func (r *pdfReporter) WriteReportPDF(reportFilename string, + templateFilename string, + dataFlowDiagramFilenamePNG string, + dataAssetDiagramFilenamePNG string, + modelFilename string, + skipRiskRules string, + buildTimestamp string, + modelHash string, + introTextRAA string, + customRiskRules map[string]*model.CustomRisk, + tempFolder string, + model *types.ParsedModel) error { + defer func() { + value := recover() + if value != nil { + fmt.Printf("error creating PDF report: %v", value) + } + }() + + r.initReport() + r.createPdfAndInitMetadata(model) + r.parseBackgroundTemplate(templateFilename) + r.createCover(model) + r.createTableOfContents(model) + err := r.createManagementSummary(model, tempFolder) + if err != nil { + return fmt.Errorf("error creating management summary: %w", err) + } + r.createImpactInitialRisks(model) + err = r.createRiskMitigationStatus(model, tempFolder) + if err != nil { + return fmt.Errorf("error creating risk mitigation status: %w", err) + } + r.createImpactRemainingRisks(model) + err = r.createTargetDescription(model, filepath.Dir(modelFilename)) + if err != nil { + return fmt.Errorf("error creating target description: %w", err) + } + r.embedDataFlowDiagram(dataFlowDiagramFilenamePNG, tempFolder) + r.createSecurityRequirements(model) + r.createAbuseCases(model) + r.createTagListing(model) + r.createSTRIDE(model) + r.createAssignmentByFunction(model) + r.createRAA(model, introTextRAA) + r.embedDataRiskMapping(dataAssetDiagramFilenamePNG, tempFolder) + //createDataRiskQuickWins() + r.createOutOfScopeAssets(model) + r.createModelFailures(model) + r.createQuestions(model) + r.createRiskCategories(model) + r.createTechnicalAssets(model) + r.createDataAssets(model) + r.createTrustBoundaries(model) + r.createSharedRuntimes(model) + r.createRiskRulesChecked(model, modelFilename, skipRiskRules, buildTimestamp, modelHash, customRiskRules) + r.createDisclaimer(model) + err = r.writeReportToFile(reportFilename) + if err != nil { + return fmt.Errorf("error writing report to file: %w", err) + } + return nil +} + +func (r *pdfReporter) createPdfAndInitMetadata(model *types.ParsedModel) { + r.pdf = gofpdf.New("P", "mm", "A4", "") + r.pdf.SetCreator(model.Author.Homepage, true) + r.pdf.SetAuthor(model.Author.Name, true) + r.pdf.SetTitle("Threat Model Report: "+model.Title, true) + r.pdf.SetSubject("Threat Model Report: "+model.Title, true) + // r.pdf.SetPageBox("crop", 0, 0, 100, 010) + r.pdf.SetHeaderFunc(func() { + if r.isLandscapePage { + return + } + + gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300) + r.pdf.SetTopMargin(35) + }) + r.pdf.SetFooterFunc(func() { + r.addBreadcrumb(model) + r.pdf.SetFont("Helvetica", "", 10) + r.pdf.SetTextColor(127, 127, 127) + r.pdf.Text(8.6, 284, "Threat Model Report via Threagile") //: "+parsedModel.Title) + r.pdf.Link(8.4, 281, 54.6, 4, r.homeLink) + r.pageNo++ + text := "Page " + strconv.Itoa(r.pageNo) + if r.pageNo < 10 { + text = " " + text + } else if r.pageNo < 100 { + text = " " + text + } + if r.pageNo > 1 { + r.pdf.Text(186, 284, text) + } + }) + r.linkCounter = 1 // link counting starts at 1 via r.pdf.AddLink +} + +func (r *pdfReporter) addBreadcrumb(parsedModel *types.ParsedModel) { + if len(r.currentChapterTitleBreadcrumb) > 0 { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetFont("Helvetica", "", 10) + r.pdf.SetTextColor(127, 127, 127) + r.pdf.Text(46.7, 24.5, uni(r.currentChapterTitleBreadcrumb+" - "+parsedModel.Title)) + } +} + +func (r *pdfReporter) parseBackgroundTemplate(templateFilename string) { + /* + imageBox, err := rice.FindBox("template") + checkErr(err) + file, err := os.CreateTemp("", "background-*-.r.pdf") + checkErr(err) + defer os.Remove(file.Name()) + backgroundBytes := imageBox.MustBytes("background.r.pdf") + err = os.WriteFile(file.Name(), backgroundBytes, 0644) + checkErr(err) + */ + r.coverTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 1, "/MediaBox") + r.contentTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 2, "/MediaBox") + r.diagramLegendTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 3, "/MediaBox") +} + +func (r *pdfReporter) createCover(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.AddPage() + gofpdi.UseImportedTemplate(r.pdf, r.coverTemplateId, 0, 0, 0, 300) + r.pdf.SetFont("Helvetica", "B", 28) + r.pdf.SetTextColor(0, 0, 0) + r.pdf.Text(40, 110, "Threat Model Report") + r.pdf.Text(40, 125, uni(parsedModel.Title)) + r.pdf.SetFont("Helvetica", "", 12) + reportDate := parsedModel.Date + if reportDate.IsZero() { + reportDate = types.Date{Time: time.Now()} + } + r.pdf.Text(40.7, 145, reportDate.Format("2 January 2006")) + r.pdf.Text(40.7, 153, uni(parsedModel.Author.Name)) + r.pdf.SetFont("Helvetica", "", 10) + r.pdf.SetTextColor(80, 80, 80) + r.pdf.Text(8.6, 275, parsedModel.Author.Homepage) + r.pdf.SetFont("Helvetica", "", 12) + r.pdf.SetTextColor(0, 0, 0) +} + +func (r *pdfReporter) createTableOfContents(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.AddPage() + r.currentChapterTitleBreadcrumb = "Table of Contents" + r.homeLink = r.pdf.AddLink() + r.defineLinkTarget("{home}") + gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300) + r.pdf.SetFont("Helvetica", "B", fontSizeHeadline) + r.pdf.Text(11, 40, "Table of Contents") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetY(46) + + r.pdf.SetLineWidth(0.25) + r.pdf.SetDrawColor(160, 160, 160) + r.pdf.SetDashPattern([]float64{0.5, 0.5}, 0) + + // =============== + + var y float64 = 50 + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Text(11, y, "Results Overview") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + y += 6 + r.pdf.Text(11, y, " "+"Management Summary") + r.pdf.Text(175, y, "{management-summary}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + risksStr := "Risks" + catStr := "Categories" + count, catCount := types.TotalRiskCount(parsedModel), len(parsedModel.GeneratedRisksByCategory) + if count == 1 { + risksStr = "Risk" + } + if catCount == 1 { + catStr = "Category" + } + y += 6 + r.pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Initial "+risksStr+" in "+strconv.Itoa(catCount)+" "+catStr) + r.pdf.Text(175, y, "{impact-analysis-initial-risks}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Risk Mitigation") + r.pdf.Text(175, y, "{risk-mitigation-status}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + risksStr = "Risks" + catStr = "Categories" + count, catCount = len(types.FilteredByStillAtRisk(parsedModel)), len(types.CategoriesOfOnlyRisksStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory)) + if count == 1 { + risksStr = "Risk" + } + if catCount == 1 { + catStr = "Category" + } + r.pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Remaining "+risksStr+" in "+strconv.Itoa(catCount)+" "+catStr) + r.pdf.Text(175, y, "{impact-analysis-remaining-risks}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Application Overview") + r.pdf.Text(175, y, "{target-overview}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Data-Flow Diagram") + r.pdf.Text(175, y, "{data-flow-diagram}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Security Requirements") + r.pdf.Text(175, y, "{security-requirements}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Abuse Cases") + r.pdf.Text(175, y, "{abuse-cases}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Tag Listing") + r.pdf.Text(175, y, "{tag-listing}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"STRIDE Classification of Identified Risks") + r.pdf.Text(175, y, "{stride}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Assignment by Function") + r.pdf.Text(175, y, "{function-assignment}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"RAA Analysis") + r.pdf.Text(175, y, "{raa-analysis}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + r.pdf.Text(11, y, " "+"Data Mapping") + r.pdf.Text(175, y, "{data-risk-mapping}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + /* + y += 6 + assets := "assets" + count = len(model.SortedTechnicalAssetsByQuickWinsAndTitle()) + if count == 1 { + assets = "asset" + } + r.pdf.Text(11, y, " "+"Data Risk Quick Wins: "+strconv.Itoa(count)+" "+assets) + r.pdf.Text(175, y, "{data-risk-quick-wins}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + */ + + y += 6 + assets := "Assets" + count = len(parsedModel.OutOfScopeTechnicalAssets()) + if count == 1 { + assets = "Asset" + } + r.pdf.Text(11, y, " "+"Out-of-Scope Assets: "+strconv.Itoa(count)+" "+assets) + r.pdf.Text(175, y, "{out-of-scope-assets}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + modelFailures := types.FlattenRiskSlice(types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory)) + risksStr = "Risks" + count = len(modelFailures) + if count == 1 { + risksStr = "Risk" + } + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, modelFailures)) + if countStillAtRisk > 0 { + ColorModelFailure(r.pdf) + } + r.pdf.Text(11, y, " "+"Potential Model Failures: "+strconv.Itoa(countStillAtRisk)+" / "+strconv.Itoa(count)+" "+risksStr) + r.pdf.Text(175, y, "{model-failures}") + r.pdfColorBlack() + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + y += 6 + questions := "Questions" + count = len(parsedModel.Questions) + if count == 1 { + questions = "Question" + } + if questionsUnanswered(parsedModel) > 0 { + ColorModelFailure(r.pdf) + } + r.pdf.Text(11, y, " "+"Questions: "+strconv.Itoa(questionsUnanswered(parsedModel))+" / "+strconv.Itoa(count)+" "+questions) + r.pdf.Text(175, y, "{questions}") + r.pdfColorBlack() + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + + // =============== + + if len(parsedModel.GeneratedRisksByCategory) > 0 { + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + r.pdf.Text(11, y, "Risks by Vulnerability Category") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + y += 6 + r.pdf.Text(11, y, " "+"Identified Risks by Vulnerability Category") + r.pdf.Text(175, y, "{intro-risks-by-vulnerability-category}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + for _, category := range types.SortedRiskCategories(parsedModel) { + newRisksStr := types.SortedRisksOfCategory(parsedModel, category) + switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 { + r.pdfColorBlack() + } + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk" + if len(newRisksStr) != 1 { + suffix += "s" + } + r.pdf.Text(11, y, " "+uni(category.Title)+": "+suffix) + r.pdf.Text(175, y, "{"+category.Id+"}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.tocLinkIdByAssetId[category.Id] = r.pdf.AddLink() + r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[category.Id]) + } + } + + // =============== + + if len(parsedModel.TechnicalAssets) > 0 { + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + r.pdf.Text(11, y, "Risks by Technical Asset") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + y += 6 + r.pdf.Text(11, y, " "+"Identified Risks by Technical Asset") + r.pdf.Text(175, y, "{intro-risks-by-technical-asset}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + for _, technicalAsset := range sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel) { + newRisksStr := technicalAsset.GeneratedRisks(parsedModel) + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk" + if len(newRisksStr) != 1 { + suffix += "s" + } + if technicalAsset.OutOfScope { + r.pdfColorOutOfScope() + suffix = "out-of-scope" + } else { + switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 { + r.pdfColorBlack() + } + } + r.pdf.Text(11, y, " "+uni(technicalAsset.Title)+": "+suffix) + r.pdf.Text(175, y, "{"+technicalAsset.Id+"}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.tocLinkIdByAssetId[technicalAsset.Id] = r.pdf.AddLink() + r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[technicalAsset.Id]) + } + } + + // =============== + + if len(parsedModel.DataAssets) > 0 { + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdfColorBlack() + r.pdf.Text(11, y, "Data Breach Probabilities by Data Asset") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + y += 6 + r.pdf.Text(11, y, " "+"Identified Data Breach Probabilities by Data Asset") + r.pdf.Text(175, y, "{intro-risks-by-data-asset}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + for _, dataAsset := range sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel) { + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + newRisksStr := dataAsset.IdentifiedDataBreachProbabilityRisks(parsedModel) + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk" + if len(newRisksStr) != 1 { + suffix += "s" + } + switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) { + case types.Probable: + ColorHighRisk(r.pdf) + case types.Possible: + ColorMediumRisk(r.pdf) + case types.Improbable: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) { + r.pdfColorBlack() + } + r.pdf.Text(11, y, " "+uni(dataAsset.Title)+": "+suffix) + r.pdf.Text(175, y, "{data:"+dataAsset.Id+"}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.tocLinkIdByAssetId[dataAsset.Id] = r.pdf.AddLink() + r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[dataAsset.Id]) + } + } + + // =============== + + if len(parsedModel.TrustBoundaries) > 0 { + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdfColorBlack() + r.pdf.Text(11, y, "Trust Boundaries") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + for _, key := range types.SortedKeysOfTrustBoundaries(parsedModel) { + trustBoundary := parsedModel.TrustBoundaries[key] + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + ColorTwilight(r.pdf) + if !trustBoundary.Type.IsNetworkBoundary() { + r.pdfColorLightGray() + } + r.pdf.Text(11, y, " "+uni(trustBoundary.Title)) + r.pdf.Text(175, y, "{boundary:"+trustBoundary.Id+"}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.tocLinkIdByAssetId[trustBoundary.Id] = r.pdf.AddLink() + r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[trustBoundary.Id]) + } + r.pdfColorBlack() + } + + // =============== + + if len(parsedModel.SharedRuntimes) > 0 { + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdfColorBlack() + r.pdf.Text(11, y, "Shared Runtime") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + for _, key := range types.SortedKeysOfSharedRuntime(parsedModel) { + sharedRuntime := parsedModel.SharedRuntimes[key] + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + r.pdf.Text(11, y, " "+uni(sharedRuntime.Title)) + r.pdf.Text(175, y, "{runtime:"+sharedRuntime.Id+"}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.tocLinkIdByAssetId[sharedRuntime.Id] = r.pdf.AddLink() + r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[sharedRuntime.Id]) + } + } + + // =============== + + y += 6 + y += 6 + if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" + r.pageBreakInLists() + y = 40 + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Text(11, y, "About Threagile") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + r.pdf.Text(11, y, " "+"Risk Rules Checked by Threagile") + r.pdf.Text(175, y, "{risk-rules-checked}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + y += 6 + if y > 275 { + r.pageBreakInLists() + y = 40 + } + r.pdfColorDisclaimer() + r.pdf.Text(11, y, " "+"Disclaimer") + r.pdf.Text(175, y, "{disclaimer}") + r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) + r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink()) + r.pdfColorBlack() + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) + + // Now write all the sections/pages. Before we start writing, we use `RegisterAlias` to + // ensure that the alias written in the table of contents will be replaced + // by the current page number. --> See the "r.pdf.RegisterAlias()" calls during the PDF creation in this file +} + +func sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset { + assets := make([]types.TechnicalAsset, 0) + for _, asset := range parsedModel.TechnicalAssets { + assets = append(assets, asset) + } + types.SortByTechnicalAssetRiskSeverityAndTitleStillAtRisk(assets, parsedModel) + return assets +} + +func sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel *types.ParsedModel) []types.DataAsset { + assets := make([]types.DataAsset, 0) + for _, asset := range parsedModel.DataAssets { + assets = append(assets, asset) + } + + types.SortByDataAssetDataBreachProbabilityAndTitleStillAtRisk(parsedModel, assets) + return assets +} + +func (r *pdfReporter) defineLinkTarget(alias string) { + pageNumbStr := strconv.Itoa(r.pdf.PageNo()) + if len(pageNumbStr) == 1 { + pageNumbStr = " " + pageNumbStr + } else if len(pageNumbStr) == 2 { + pageNumbStr = " " + pageNumbStr + } + r.pdf.RegisterAlias(alias, pageNumbStr) + r.pdf.SetLink(r.linkCounter, 0, -1) + r.linkCounter++ +} + +func (r *pdfReporter) createDisclaimer(parsedModel *types.ParsedModel) { + r.pdf.AddPage() + r.currentChapterTitleBreadcrumb = "Disclaimer" + r.defineLinkTarget("{disclaimer}") + gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300) + r.pdfColorDisclaimer() + r.pdf.SetFont("Helvetica", "B", fontSizeHeadline) + r.pdf.Text(11, 40, "Disclaimer") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetY(46) + + var disclaimer strings.Builder + disclaimer.WriteString(parsedModel.Author.Name + " conducted this threat analysis using the open-source Threagile toolkit " + + "on the applications and systems that were modeled as of this report's date. " + + "Information security threats are continually changing, with new " + + "vulnerabilities discovered on a daily basis, and no application can ever be 100% secure no matter how much " + + "threat modeling is conducted. It is recommended to execute threat modeling and also penetration testing on a regular basis " + + "(for example yearly) to ensure a high ongoing level of security and constantly check for new attack vectors. " + + "

" + + "This report cannot and does not protect against personal or business loss as the result of use of the " + + "applications or systems described. " + parsedModel.Author.Name + " and the Threagile toolkit offers no warranties, representations or " + + "legal certifications concerning the applications or systems it tests. All software includes defects: nothing " + + "in this document is intended to represent or warrant that threat modeling was complete and without error, " + + "nor does this document represent or warrant that the architecture analyzed is suitable to task, free of other " + + "defects than reported, fully compliant with any industry standards, or fully compatible with any operating " + + "system, hardware, or other application. Threat modeling tries to analyze the modeled architecture without " + + "having access to a real working system and thus cannot and does not test the implementation for defects and vulnerabilities. " + + "These kinds of checks would only be possible with a separate code review and penetration test against " + + "a working system and not via a threat model." + + "

" + + "By using the resulting information you agree that " + parsedModel.Author.Name + " and the Threagile toolkit " + + "shall be held harmless in any event." + + "

" + + "This report is confidential and intended for internal, confidential use by the client. The recipient " + + "is obligated to ensure the highly confidential contents are kept secret. The recipient assumes responsibility " + + "for further distribution of this document." + + "

" + + "In this particular project, a time box approach was used to define the analysis effort. This means that the " + + "author allotted a prearranged amount of time to identify and document threats. Because of this, there " + + "is no guarantee that all possible threats and risks are discovered. Furthermore, the analysis " + + "applies to a snapshot of the current state of the modeled architecture (based on the architecture information provided " + + "by the customer) at the examination time." + + "


" + + "Report Distribution" + + "

" + + "Distribution of this report (in full or in part like diagrams or risk findings) requires that this disclaimer " + + "as well as the chapter about the Threagile toolkit and method used is kept intact as part of the " + + "distributed report or referenced from the distributed parts.") + html := r.pdf.HTMLBasicNew() + html.Write(5, disclaimer.String()) + r.pdfColorBlack() +} + +func (r *pdfReporter) createManagementSummary(parsedModel *types.ParsedModel, tempFolder string) error { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + title := "Management Summary" + r.addHeadline(title, false) + r.defineLinkTarget("{management-summary}") + r.currentChapterTitleBreadcrumb = title + countCritical := len(types.FilteredByOnlyCriticalRisks(parsedModel)) + countHigh := len(types.FilteredByOnlyHighRisks(parsedModel)) + countElevated := len(types.FilteredByOnlyElevatedRisks(parsedModel)) + countMedium := len(types.FilteredByOnlyMediumRisks(parsedModel)) + countLow := len(types.FilteredByOnlyLowRisks(parsedModel)) + + countStatusUnchecked := len(types.FilteredByRiskTrackingUnchecked(parsedModel)) + countStatusInDiscussion := len(types.FilteredByRiskTrackingInDiscussion(parsedModel)) + countStatusAccepted := len(types.FilteredByRiskTrackingAccepted(parsedModel)) + countStatusInProgress := len(types.FilteredByRiskTrackingInProgress(parsedModel)) + countStatusMitigated := len(types.FilteredByRiskTrackingMitigated(parsedModel)) + countStatusFalsePositive := len(types.FilteredByRiskTrackingFalsePositive(parsedModel)) + + html := r.pdf.HTMLBasicNew() + html.Write(5, "Threagile toolkit was used to model the architecture of \""+uni(parsedModel.Title)+"\" "+ + "and derive risks by analyzing the components and data flows. The risks identified during this analysis are shown "+ + "in the following chapters. Identified risks during threat modeling do not necessarily mean that the "+ + "vulnerability associated with this risk actually exists: it is more to be seen as a list of potential risks and "+ + "threats, which should be individually reviewed and reduced by removing false positives. For the remaining risks it should "+ + "be checked in the design and implementation of \""+uni(parsedModel.Title)+"\" whether the mitigation advices "+ + "have been applied or not."+ + "

"+ + "Each risk finding references a chapter of the OWASP ASVS (Application Security Verification Standard) audit checklist. "+ + "The OWASP ASVS checklist should be considered as an inspiration by architects and developers to further harden "+ + "the application in a Defense-in-Depth approach. Additionally, for each risk finding a "+ + "link towards a matching OWASP Cheat Sheet or similar with technical details about how to implement a mitigation is given."+ + "

"+ + "In total "+strconv.Itoa(types.TotalRiskCount(parsedModel))+" initial risks in "+strconv.Itoa(len(parsedModel.GeneratedRisksByCategory))+" categories have "+ + "been identified during the threat modeling process:

") // TODO plural singular stuff risk/s category/ies has/have + + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "") + ColorRiskStatusUnchecked(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + + ColorCriticalRisk(r.pdf) + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "critical risk", "0", 0, "", false, 0, "") + ColorRiskStatusInDiscussion(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + + ColorHighRisk(r.pdf) + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "high risk", "0", 0, "", false, 0, "") + ColorRiskStatusAccepted(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + + ColorElevatedRisk(r.pdf) + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "elevated risk", "0", 0, "", false, 0, "") + ColorRiskStatusInProgress(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + + ColorMediumRisk(r.pdf) + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "medium risk", "0", 0, "", false, 0, "") + ColorRiskStatusMitigated(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "") + r.pdf.SetFont("Helvetica", "BI", fontSizeBody) + r.pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(-1) + + ColorLowRisk(r.pdf) + r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "low risk", "0", 0, "", false, 0, "") + ColorRiskStatusFalsePositive(r.pdf) + r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "") + r.pdf.SetFont("Helvetica", "BI", fontSizeBody) + r.pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(-1) + + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + // pie chart: risk severity + pieChartRiskSeverity := chart.PieChart{ + Width: 1500, + Height: 1500, + Values: []chart.Value{ + {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorLowRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorLowRisk()), + FontSize: 65}}, + {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorMediumRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorMediumRisk()), + FontSize: 65}}, + {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorElevatedRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorElevatedRisk()), + FontSize: 65}}, + {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorHighRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorHighRisk()), + FontSize: 65}}, + {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorCriticalRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorCriticalRisk()), + FontSize: 65}}, + }, + } + + // pie chart: risk status + pieChartRiskStatus := chart.PieChart{ + Width: 1500, + Height: 1500, + Values: []chart.Value{ + {Value: float64(countStatusFalsePositive), //Label: strconv.Itoa(countStatusFalsePositive) + " False Positive", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusFalsePositive()), + FontSize: 65}}, + {Value: float64(countStatusMitigated), //Label: strconv.Itoa(countStatusMitigated) + " Mitigated", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusMitigated()), + FontSize: 65}}, + {Value: float64(countStatusInProgress), //Label: strconv.Itoa(countStatusInProgress) + " InProgress", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusInProgress()), + FontSize: 65}}, + {Value: float64(countStatusAccepted), //Label: strconv.Itoa(countStatusAccepted) + " Accepted", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusAccepted()), + FontSize: 65}}, + {Value: float64(countStatusInDiscussion), //Label: strconv.Itoa(countStatusInDiscussion) + " InDiscussion", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusInDiscussion()), + FontSize: 65}}, + {Value: float64(countStatusUnchecked), //Label: strconv.Itoa(countStatusUnchecked) + " Unchecked", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorRiskStatusUnchecked()), + FontSize: 65}}, + }, + } + + y := r.pdf.GetY() + 5 + err := r.embedPieChart(pieChartRiskSeverity, 15.0, y, tempFolder) + if err != nil { + return fmt.Errorf("unable to embed pie chart: %w", err) + } + + err = r.embedPieChart(pieChartRiskStatus, 110.0, y, tempFolder) + if err != nil { + return fmt.Errorf("unable to embed pie chart: %w", err) + } + + // individual management summary comment + r.pdfColorBlack() + if len(parsedModel.ManagementSummaryComment) > 0 { + html.Write(5, "















"+ + parsedModel.ManagementSummaryComment) + } + return nil +} + +func (r *pdfReporter) createRiskMitigationStatus(parsedModel *types.ParsedModel, tempFolder string) error { + r.pdf.SetTextColor(0, 0, 0) + stillAtRisk := types.FilteredByStillAtRisk(parsedModel) + count := len(stillAtRisk) + title := "Risk Mitigation" + r.addHeadline(title, false) + r.defineLinkTarget("{risk-mitigation-status}") + r.currentChapterTitleBreadcrumb = title + + html := r.pdf.HTMLBasicNew() + html.Write(5, "The following chart gives a high-level overview of the risk tracking status (including mitigated risks):") + + risksCritical := types.FilteredByOnlyCriticalRisks(parsedModel) + risksHigh := types.FilteredByOnlyHighRisks(parsedModel) + risksElevated := types.FilteredByOnlyElevatedRisks(parsedModel) + risksMedium := types.FilteredByOnlyMediumRisks(parsedModel) + risksLow := types.FilteredByOnlyLowRisks(parsedModel) + + countStatusUnchecked := len(types.FilteredByRiskTrackingUnchecked(parsedModel)) + countStatusInDiscussion := len(types.FilteredByRiskTrackingInDiscussion(parsedModel)) + countStatusAccepted := len(types.FilteredByRiskTrackingAccepted(parsedModel)) + countStatusInProgress := len(types.FilteredByRiskTrackingInProgress(parsedModel)) + countStatusMitigated := len(types.FilteredByRiskTrackingMitigated(parsedModel)) + countStatusFalsePositive := len(types.FilteredByRiskTrackingFalsePositive(parsedModel)) + + stackedBarChartRiskTracking := chart.StackedBarChart{ + Width: 4000, + //Height: 2500, + XAxis: chart.Style{Show: false, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom}, + YAxis: chart.Style{Show: true, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom}, + Bars: []chart.StackedBar{ + { + Name: types.LowSeverity.Title(), + Width: 130, + Values: []chart.Value{ + {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksLow))), Label: types.Unchecked.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksLow))), Label: types.InDiscussion.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksLow))), Label: types.Accepted.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksLow))), Label: types.InProgress.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksLow))), Label: types.Mitigated.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksLow))), Label: types.FalsePositive.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + }, + }, + { + Name: types.MediumSeverity.Title(), + Width: 130, + Values: []chart.Value{ + {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksMedium))), Label: types.Unchecked.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksMedium))), Label: types.InDiscussion.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksMedium))), Label: types.Accepted.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksMedium))), Label: types.InProgress.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksMedium))), Label: types.Mitigated.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksMedium))), Label: types.FalsePositive.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + }, + }, + { + Name: types.ElevatedSeverity.Title(), + Width: 130, + Values: []chart.Value{ + {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksElevated))), Label: types.Unchecked.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksElevated))), Label: types.InDiscussion.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksElevated))), Label: types.Accepted.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksElevated))), Label: types.InProgress.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksElevated))), Label: types.Mitigated.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksElevated))), Label: types.FalsePositive.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + }, + }, + { + Name: types.HighSeverity.Title(), + Width: 130, + Values: []chart.Value{ + {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksHigh))), Label: types.Unchecked.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksHigh))), Label: types.InDiscussion.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksHigh))), Label: types.Accepted.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksHigh))), Label: types.InProgress.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksHigh))), Label: types.Mitigated.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksHigh))), Label: types.FalsePositive.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + }, + }, + { + Name: types.CriticalSeverity.Title(), + Width: 130, + Values: []chart.Value{ + {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksCritical))), Label: types.Unchecked.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksCritical))), Label: types.InDiscussion.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksCritical))), Label: types.Accepted.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksCritical))), Label: types.InProgress.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksCritical))), Label: types.Mitigated.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksCritical))), Label: types.FalsePositive.Title(), + Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, + }, + }, + }, + } + + y := r.pdf.GetY() + 12 + err := r.embedStackedBarChart(stackedBarChartRiskTracking, 15.0, y, tempFolder) + if err != nil { + return err + } + + // draw the X-Axis legend on my own + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorBlack() + r.pdf.Text(24.02, 169, "Low ("+strconv.Itoa(len(risksLow))+")") + r.pdf.Text(46.10, 169, "Medium ("+strconv.Itoa(len(risksMedium))+")") + r.pdf.Text(69.74, 169, "Elevated ("+strconv.Itoa(len(risksElevated))+")") + r.pdf.Text(97.95, 169, "High ("+strconv.Itoa(len(risksHigh))+")") + r.pdf.Text(121.65, 169, "Critical ("+strconv.Itoa(len(risksCritical))+")") + + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(20) + + ColorRiskStatusUnchecked(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorRiskStatusInDiscussion(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorRiskStatusAccepted(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorRiskStatusInProgress(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorRiskStatusMitigated(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "") + r.pdf.SetFont("Helvetica", "BI", fontSizeBody) + r.pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(-1) + ColorRiskStatusFalsePositive(r.pdf) + r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "") + r.pdf.SetFont("Helvetica", "BI", fontSizeBody) + r.pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(-1) + + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + r.pdfColorBlack() + if count == 0 { + html.Write(5, "














"+ + "After removal of risks with status mitigated and false positive "+ + ""+strconv.Itoa(count)+" remain unmitigated.") + } else { + html.Write(5, "














"+ + "After removal of risks with status mitigated and false positive "+ + "the following "+strconv.Itoa(count)+" remain unmitigated:") + + countCritical := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyCriticalRisks(parsedModel))) + countHigh := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyHighRisks(parsedModel))) + countElevated := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyElevatedRisks(parsedModel))) + countMedium := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyMediumRisks(parsedModel))) + countLow := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyLowRisks(parsedModel))) + + countBusinessSide := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyBusinessSide(parsedModel))) + countArchitecture := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyArchitecture(parsedModel))) + countDevelopment := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyDevelopment(parsedModel))) + countOperation := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyOperation(parsedModel))) + + pieChartRemainingRiskSeverity := chart.PieChart{ + Width: 1500, + Height: 1500, + Values: []chart.Value{ + {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorLowRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorLowRisk()), + FontSize: 65}}, + {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorMediumRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorMediumRisk()), + FontSize: 65}}, + {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorElevatedRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorElevatedRisk()), + FontSize: 65}}, + {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorHighRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorHighRisk()), + FontSize: 65}}, + {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical", + Style: chart.Style{ + FillColor: makeColor(RgbHexColorCriticalRisk()).WithAlpha(98), + //FontColor: makeColor(RgbHexColorCriticalRisk()), + FontSize: 65}}, + }, + } + + pieChartRemainingRisksByFunction := chart.PieChart{ + Width: 1500, + Height: 1500, + Values: []chart.Value{ + {Value: float64(countBusinessSide), + Style: chart.Style{ + FillColor: makeColor(RgbHexColorBusiness()).WithAlpha(98), + FontSize: 65}}, + {Value: float64(countArchitecture), + Style: chart.Style{ + FillColor: makeColor(RgbHexColorArchitecture()).WithAlpha(98), + FontSize: 65}}, + {Value: float64(countDevelopment), + Style: chart.Style{ + FillColor: makeColor(RgbHexColorDevelopment()).WithAlpha(98), + FontSize: 65}}, + {Value: float64(countOperation), + Style: chart.Style{ + FillColor: makeColor(RgbHexColorOperation()).WithAlpha(98), + FontSize: 65}}, + }, + } + + _ = r.embedPieChart(pieChartRemainingRiskSeverity, 15.0, 216, tempFolder) + _ = r.embedPieChart(pieChartRemainingRisksByFunction, 110.0, 216, tempFolder) + + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.Ln(8) + + ColorCriticalRisk(r.pdf) + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unmitigated critical risk", "0", 0, "", false, 0, "") + r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, "", "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorHighRisk(r.pdf) + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unmitigated high risk", "0", 0, "", false, 0, "") + ColorBusiness(r.pdf) + r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countBusinessSide), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "business side related", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorElevatedRisk(r.pdf) + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unmitigated elevated risk", "0", 0, "", false, 0, "") + ColorArchitecture(r.pdf) + r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countArchitecture), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "architecture related", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorMediumRisk(r.pdf) + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unmitigated medium risk", "0", 0, "", false, 0, "") + ColorDevelopment(r.pdf) + r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countDevelopment), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "development related", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + ColorLowRisk(r.pdf) + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "unmitigated low risk", "0", 0, "", false, 0, "") + ColorOperation(r.pdf) + r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(10, 6, strconv.Itoa(countOperation), "0", 0, "R", false, 0, "") + r.pdf.CellFormat(60, 6, "operations related", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + } + return nil +} + +// CAUTION: Long labels might cause endless loop, then remove labels and render them manually later inside the PDF +func (r *pdfReporter) embedStackedBarChart(sbcChart chart.StackedBarChart, x float64, y float64, tempFolder string) error { + tmpFilePNG, err := os.CreateTemp(tempFolder, "chart-*-.png") + if err != nil { + return fmt.Errorf("error creating temporary file for chart: %w", err) + } + defer func() { _ = os.Remove(tmpFilePNG.Name()) }() + file, _ := os.Create(tmpFilePNG.Name()) + defer func() { _ = file.Close() }() + err = sbcChart.Render(chart.PNG, file) + if err != nil { + return fmt.Errorf("error rendering chart: %w", err) + } + var options gofpdf.ImageOptions + options.ImageType = "" + r.pdf.RegisterImage(tmpFilePNG.Name(), "") + r.pdf.ImageOptions(tmpFilePNG.Name(), x, y, 0, 110, false, options, 0, "") + return nil +} + +func (r *pdfReporter) embedPieChart(pieChart chart.PieChart, x float64, y float64, tempFolder string) error { + tmpFilePNG, err := os.CreateTemp(tempFolder, "chart-*-.png") + if err != nil { + return fmt.Errorf("error creating temporary file for chart: %w", err) + } + defer func() { _ = os.Remove(tmpFilePNG.Name()) }() + file, err := os.Create(tmpFilePNG.Name()) + if err != nil { + return fmt.Errorf("error creating temporary file for chart: %w", err) + } + defer func() { _ = file.Close() }() + err = pieChart.Render(chart.PNG, file) + if err != nil { + return fmt.Errorf("error rendering chart: %w", err) + } + var options gofpdf.ImageOptions + options.ImageType = "" + r.pdf.RegisterImage(tmpFilePNG.Name(), "") + r.pdf.ImageOptions(tmpFilePNG.Name(), x, y, 60, 0, false, options, 0, "") + return nil +} + +func makeColor(hexColor string) drawing.Color { + _, i := utf8.DecodeRuneInString(hexColor) + return drawing.ColorFromHex(hexColor[i:]) // = remove first char, which is # in rgb hex here +} + +func (r *pdfReporter) createImpactInitialRisks(parsedModel *types.ParsedModel) { + r.renderImpactAnalysis(parsedModel, true) +} + +func (r *pdfReporter) createImpactRemainingRisks(parsedModel *types.ParsedModel) { + r.renderImpactAnalysis(parsedModel, false) +} + +func (r *pdfReporter) renderImpactAnalysis(parsedModel *types.ParsedModel, initialRisks bool) { + r.pdf.SetTextColor(0, 0, 0) + count, catCount := types.TotalRiskCount(parsedModel), len(parsedModel.GeneratedRisksByCategory) + if !initialRisks { + count, catCount = len(types.FilteredByStillAtRisk(parsedModel)), len(types.CategoriesOfOnlyRisksStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory)) + } + riskStr, catStr := "Risks", "Categories" + if count == 1 { + riskStr = "Risk" + } + if catCount == 1 { + catStr = "Category" + } + if initialRisks { + chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Initial " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{impact-analysis-initial-risks}") + r.currentChapterTitleBreadcrumb = chapTitle + } else { + chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Remaining " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{impact-analysis-remaining-risks}") + r.currentChapterTitleBreadcrumb = chapTitle + } + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + riskStr = "risks" + if count == 1 { + riskStr = "risk" + } + initialStr := "initial" + if !initialRisks { + initialStr = "remaining" + } + strBuilder.WriteString("The most prevalent impacts of the " + strconv.Itoa(count) + " " + + initialStr + " " + riskStr + " (distributed over " + strconv.Itoa(catCount) + " risk categories) are " + + "(taking the severity ratings into account and using the highest for each category):
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)), + types.CriticalSeverity, false, initialRisks, true, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)), + types.HighSeverity, false, initialRisks, true, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)), + types.ElevatedSeverity, false, initialRisks, true, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)), + types.MediumSeverity, false, initialRisks, true, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)), + types.LowSeverity, false, initialRisks, true, false) + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +func (r *pdfReporter) createOutOfScopeAssets(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + assets := "Assets" + count := len(parsedModel.OutOfScopeTechnicalAssets()) + if count == 1 { + assets = "Asset" + } + chapTitle := "Out-of-Scope Assets: " + strconv.Itoa(count) + " " + assets + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{out-of-scope-assets}") + r.currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + strBuilder.WriteString("This chapter lists all technical assets that have been defined as out-of-scope. " + + "Each one should be checked in the model whether it should better be included in the " + + "overall risk analysis:
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + outOfScopeAssetCount := 0 + for _, technicalAsset := range sortedTechnicalAssetsByRAAAndTitle(parsedModel) { + if technicalAsset.OutOfScope { + outOfScopeAssetCount++ + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + strBuilder.WriteString("

") + } + html.Write(5, strBuilder.String()) + strBuilder.Reset() + posY := r.pdf.GetY() + r.pdfColorOutOfScope() + strBuilder.WriteString("") + strBuilder.WriteString(uni(technicalAsset.Title)) + strBuilder.WriteString("") + strBuilder.WriteString(": out-of-scope") + strBuilder.WriteString("
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetTextColor(0, 0, 0) + strBuilder.WriteString(uni(technicalAsset.JustificationOutOfScope)) + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[technicalAsset.Id]) + } + } + + if outOfScopeAssetCount == 0 { + r.pdfColorGray() + html.Write(5, "

No technical assets have been defined as out-of-scope.") + } + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +func sortedTechnicalAssetsByRAAAndTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset { + assets := make([]types.TechnicalAsset, 0) + for _, asset := range parsedModel.TechnicalAssets { + assets = append(assets, asset) + } + sort.Sort(types.ByTechnicalAssetRAAAndTitleSort(assets)) + return assets +} + +func (r *pdfReporter) createModelFailures(parsedModel *types.ParsedModel) { + r.pdf.SetTextColor(0, 0, 0) + modelFailures := types.FlattenRiskSlice(types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory)) + risksStr := "Risks" + count := len(modelFailures) + if count == 1 { + risksStr = "Risk" + } + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, modelFailures)) + if countStillAtRisk > 0 { + ColorModelFailure(r.pdf) + } + chapTitle := "Potential Model Failures: " + strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(count) + " " + risksStr + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{model-failures}") + r.currentChapterTitleBreadcrumb = chapTitle + r.pdfColorBlack() + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + strBuilder.WriteString("This chapter lists potential model failures where not all relevant assets have been " + + "modeled or the model might itself contain inconsistencies. Each potential model failure should be checked " + + "in the model against the architecture design:
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + modelFailuresByCategory := types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory) + if len(modelFailuresByCategory) == 0 { + r.pdfColorGray() + html.Write(5, "

No potential model failures have been identified.") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, modelFailuresByCategory, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, modelFailuresByCategory, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, modelFailuresByCategory, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, modelFailuresByCategory, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, modelFailuresByCategory, true)), + types.LowSeverity, true, true, false, true) + } + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +func (r *pdfReporter) createRAA(parsedModel *types.ParsedModel, introTextRAA string) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + chapTitle := "RAA Analysis" + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{raa-analysis}") + r.currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + strBuilder.WriteString(introTextRAA) + strBuilder.WriteString("
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + for _, technicalAsset := range sortedTechnicalAssetsByRAAAndTitle(parsedModel) { + if technicalAsset.OutOfScope { + continue + } + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + strBuilder.WriteString("

") + } + newRisksStr := technicalAsset.GeneratedRisks(parsedModel) + switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) { + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 { + r.pdfColorBlack() + } + + html.Write(5, strBuilder.String()) + strBuilder.Reset() + posY := r.pdf.GetY() + strBuilder.WriteString("") + strBuilder.WriteString(uni(technicalAsset.Title)) + strBuilder.WriteString("") + if technicalAsset.OutOfScope { + strBuilder.WriteString(": out-of-scope") + } else { + strBuilder.WriteString(": RAA ") + strBuilder.WriteString(fmt.Sprintf("%.0f", technicalAsset.RAA)) + strBuilder.WriteString("%") + } + strBuilder.WriteString("
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetTextColor(0, 0, 0) + strBuilder.WriteString(uni(technicalAsset.Description)) + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[technicalAsset.Id]) + } + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +/* +func createDataRiskQuickWins() { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + assets := "assets" + count := len(model.SortedTechnicalAssetsByQuickWinsAndTitle()) + if count == 1 { + assets = "asset" + } + chapTitle := "Data Risk Quick Wins: " + strconv.Itoa(count) + " " + assets + r.addHeadline(chapTitle, false) + defineLinkTarget("{data-risk-quick-wins}") + currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + strBuilder.WriteString("For each technical asset it was checked how many data assets at risk might " + + "get their risk-rating reduced (partly or fully) when the risks of the technical asset are mitigated. " + + "In general, that means the higher the quick win value is, the more data assets (left side of the Data Risk Mapping diagram) " + + "turn from red to amber or from amber to blue by mitigating the technical asset's risks. " + + "This list can be used to prioritize on efforts with the greatest effects of reducing data asset risks:
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + for _, technicalAsset := range model.SortedTechnicalAssetsByQuickWinsAndTitle() { + quickWins := technicalAsset.QuickWins() + if r.pdf.GetY() > 260 { + r.pageBreak() + r.pdf.SetY(36) + } else { + strBuilder.WriteString("

") + } + risks := technicalAsset.GeneratedRisks() + switch model.HighestSeverityStillAtRisk(risks) { + case model.High: + ColorHighRisk(r.pdf) + case model.Medium: + ColorMediumRisk(r.pdf) + case model.Low: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { + r.pdfColorBlack() + } + + html.Write(5, strBuilder.String()) + strBuilder.Reset() + posY := r.pdf.GetY() + strBuilder.WriteString("") + strBuilder.WriteString(uni(technicalAsset.Title)) + strBuilder.WriteString("") + strBuilder.WriteString(": ") + strBuilder.WriteString(fmt.Sprintf("%.2f", quickWins)) + strBuilder.WriteString(" Quick Wins") + strBuilder.WriteString("
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetTextColor(0, 0, 0) + strBuilder.WriteString(uni(technicalAsset.Description)) + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id]) + } + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} +*/ + +func (r *pdfReporter) addCategories(parsedModel *types.ParsedModel, riskCategories []types.RiskCategory, severity types.RiskSeverity, bothInitialAndRemainingRisks bool, initialRisks bool, describeImpact bool, describeDescription bool) { + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + sort.Sort(types.ByRiskCategoryTitleSort(riskCategories)) + for _, riskCategory := range riskCategories { + risksStr := parsedModel.GeneratedRisksByCategory[riskCategory.Id] + if !initialRisks { + risksStr = types.ReduceToOnlyStillAtRisk(parsedModel, risksStr) + } + if len(risksStr) == 0 { + continue + } + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + strBuilder.WriteString("

") + } + var prefix string + switch severity { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + prefix = "Critical: " + case types.HighSeverity: + ColorHighRisk(r.pdf) + prefix = "High: " + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + prefix = "Elevated: " + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + prefix = "Medium: " + case types.LowSeverity: + ColorLowRisk(r.pdf) + prefix = "Low: " + default: + r.pdfColorBlack() + prefix = "" + } + switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 { + r.pdfColorBlack() + } + html.Write(5, strBuilder.String()) + strBuilder.Reset() + posY := r.pdf.GetY() + strBuilder.WriteString(prefix) + strBuilder.WriteString("") + strBuilder.WriteString(riskCategory.Title) + strBuilder.WriteString(": ") + count := len(risksStr) + initialStr := "Initial" + if !initialRisks { + initialStr = "Remaining" + } + remainingRisks := types.ReduceToOnlyStillAtRisk(parsedModel, risksStr) + suffix := strconv.Itoa(count) + " " + initialStr + " Risk" + if bothInitialAndRemainingRisks { + suffix = strconv.Itoa(len(remainingRisks)) + " / " + strconv.Itoa(count) + " Risk" + } + if count != 1 { + suffix += "s" + } + suffix += " - Exploitation likelihood is " + if initialRisks { + suffix += types.HighestExploitationLikelihood(risksStr).Title() + " with " + types.HighestExploitationImpact(risksStr).Title() + " impact." + } else { + suffix += types.HighestExploitationLikelihood(remainingRisks).Title() + " with " + types.HighestExploitationImpact(remainingRisks).Title() + " impact." + } + strBuilder.WriteString(suffix + "
") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.SetTextColor(0, 0, 0) + if describeImpact { + strBuilder.WriteString(firstParagraph(riskCategory.Impact)) + } else if describeDescription { + strBuilder.WriteString(firstParagraph(riskCategory.Description)) + } else { + strBuilder.WriteString(firstParagraph(riskCategory.Mitigation)) + } + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[riskCategory.Id]) + } +} + +func firstParagraph(text string) string { + firstParagraphRegEx := regexp.MustCompile(`(.*?)((
)|(

))`) + match := firstParagraphRegEx.FindStringSubmatch(text) + if len(match) == 0 { + return text + } + return match[1] +} + +func (r *pdfReporter) createAssignmentByFunction(parsedModel *types.ParsedModel) { + r.pdf.SetTextColor(0, 0, 0) + title := "Assignment by Function" + r.addHeadline(title, false) + r.defineLinkTarget("{function-assignment}") + r.currentChapterTitleBreadcrumb = title + + risksBusinessSideFunction := types.RisksOfOnlyBusinessSide(parsedModel, parsedModel.GeneratedRisksByCategory) + risksArchitectureFunction := types.RisksOfOnlyArchitecture(parsedModel, parsedModel.GeneratedRisksByCategory) + risksDevelopmentFunction := types.RisksOfOnlyDevelopment(parsedModel, parsedModel.GeneratedRisksByCategory) + risksOperationFunction := types.RisksOfOnlyOperation(parsedModel, parsedModel.GeneratedRisksByCategory) + + countBusinessSideFunction := types.CountRisks(risksBusinessSideFunction) + countArchitectureFunction := types.CountRisks(risksArchitectureFunction) + countDevelopmentFunction := types.CountRisks(risksDevelopmentFunction) + countOperationFunction := types.CountRisks(risksOperationFunction) + var intro strings.Builder + intro.WriteString("This chapter clusters and assigns the risks by functions which are most likely able to " + + "check and mitigate them: " + + "In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " + + "of which " + strconv.Itoa(countBusinessSideFunction) + " should be checked by " + types.BusinessSide.Title() + ", " + + "" + strconv.Itoa(countArchitectureFunction) + " should be checked by " + types.Architecture.Title() + ", " + + "" + strconv.Itoa(countDevelopmentFunction) + " should be checked by " + types.Development.Title() + ", " + + "and " + strconv.Itoa(countOperationFunction) + " should be checked by " + types.Operations.Title() + ".
") + html := r.pdf.HTMLBasicNew() + html.Write(5, intro.String()) + intro.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + oldLeft, _, _, _ := r.pdf.GetMargins() + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.BusinessSide.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksBusinessSideFunction) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksBusinessSideFunction, true)), + types.CriticalSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksBusinessSideFunction, true)), + types.HighSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksBusinessSideFunction, true)), + types.ElevatedSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksBusinessSideFunction, true)), + types.MediumSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksBusinessSideFunction, true)), + types.LowSeverity, true, true, false, false) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Architecture.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksArchitectureFunction) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksArchitectureFunction, true)), + types.CriticalSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksArchitectureFunction, true)), + types.HighSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksArchitectureFunction, true)), + types.ElevatedSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksArchitectureFunction, true)), + types.MediumSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksArchitectureFunction, true)), + types.LowSeverity, true, true, false, false) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Development.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksDevelopmentFunction) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksDevelopmentFunction, true)), + types.CriticalSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksDevelopmentFunction, true)), + types.HighSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksDevelopmentFunction, true)), + types.ElevatedSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksDevelopmentFunction, true)), + types.MediumSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksDevelopmentFunction, true)), + types.LowSeverity, true, true, false, false) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Operations.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksOperationFunction) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksOperationFunction, true)), + types.CriticalSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksOperationFunction, true)), + types.HighSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksOperationFunction, true)), + types.ElevatedSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksOperationFunction, true)), + types.MediumSeverity, true, true, false, false) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksOperationFunction, true)), + types.LowSeverity, true, true, false, false) + } + r.pdf.SetLeftMargin(oldLeft) + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +func (r *pdfReporter) createSTRIDE(parsedModel *types.ParsedModel) { + r.pdf.SetTextColor(0, 0, 0) + title := "STRIDE Classification of Identified Risks" + r.addHeadline(title, false) + r.defineLinkTarget("{stride}") + r.currentChapterTitleBreadcrumb = title + + risksSTRIDESpoofing := types.RisksOfOnlySTRIDESpoofing(parsedModel, parsedModel.GeneratedRisksByCategory) + risksSTRIDETampering := types.RisksOfOnlySTRIDETampering(parsedModel, parsedModel.GeneratedRisksByCategory) + risksSTRIDERepudiation := types.RisksOfOnlySTRIDERepudiation(parsedModel, parsedModel.GeneratedRisksByCategory) + risksSTRIDEInformationDisclosure := types.RisksOfOnlySTRIDEInformationDisclosure(parsedModel, parsedModel.GeneratedRisksByCategory) + risksSTRIDEDenialOfService := types.RisksOfOnlySTRIDEDenialOfService(parsedModel, parsedModel.GeneratedRisksByCategory) + risksSTRIDEElevationOfPrivilege := types.RisksOfOnlySTRIDEElevationOfPrivilege(parsedModel, parsedModel.GeneratedRisksByCategory) + + countSTRIDESpoofing := types.CountRisks(risksSTRIDESpoofing) + countSTRIDETampering := types.CountRisks(risksSTRIDETampering) + countSTRIDERepudiation := types.CountRisks(risksSTRIDERepudiation) + countSTRIDEInformationDisclosure := types.CountRisks(risksSTRIDEInformationDisclosure) + countSTRIDEDenialOfService := types.CountRisks(risksSTRIDEDenialOfService) + countSTRIDEElevationOfPrivilege := types.CountRisks(risksSTRIDEElevationOfPrivilege) + var intro strings.Builder + intro.WriteString("This chapter clusters and classifies the risks by STRIDE categories: " + + "In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " + + "of which " + strconv.Itoa(countSTRIDESpoofing) + " in the " + types.Spoofing.Title() + " category, " + + "" + strconv.Itoa(countSTRIDETampering) + " in the " + types.Tampering.Title() + " category, " + + "" + strconv.Itoa(countSTRIDERepudiation) + " in the " + types.Repudiation.Title() + " category, " + + "" + strconv.Itoa(countSTRIDEInformationDisclosure) + " in the " + types.InformationDisclosure.Title() + " category, " + + "" + strconv.Itoa(countSTRIDEDenialOfService) + " in the " + types.DenialOfService.Title() + " category, " + + "and " + strconv.Itoa(countSTRIDEElevationOfPrivilege) + " in the " + types.ElevationOfPrivilege.Title() + " category.
") + html := r.pdf.HTMLBasicNew() + html.Write(5, intro.String()) + intro.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + oldLeft, _, _, _ := r.pdf.GetMargins() + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Spoofing.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDESpoofing) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDESpoofing, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDESpoofing, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDESpoofing, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDESpoofing, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDESpoofing, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Tampering.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDETampering) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDETampering, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDETampering, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDETampering, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDETampering, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDETampering, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.Repudiation.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDERepudiation) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDERepudiation, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDERepudiation, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDERepudiation, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDERepudiation, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDERepudiation, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.InformationDisclosure.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDEInformationDisclosure) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEInformationDisclosure, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEInformationDisclosure, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEInformationDisclosure, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEInformationDisclosure, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEInformationDisclosure, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.DenialOfService.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDEDenialOfService) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEDenialOfService, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEDenialOfService, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEDenialOfService, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEDenialOfService, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEDenialOfService, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetTextColor(0, 0, 0) + html.Write(5, ""+types.ElevationOfPrivilege.Title()+"") + r.pdf.SetLeftMargin(15) + if len(risksSTRIDEElevationOfPrivilege) == 0 { + r.pdf.SetTextColor(150, 150, 150) + html.Write(5, "

n/a") + } else { + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)), + types.CriticalSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)), + types.HighSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)), + types.ElevatedSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)), + types.MediumSeverity, true, true, false, true) + r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)), + types.LowSeverity, true, true, false, true) + } + r.pdf.SetLeftMargin(oldLeft) + + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) +} + +func (r *pdfReporter) createSecurityRequirements(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + chapTitle := "Security Requirements" + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{security-requirements}") + r.currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + html.Write(5, "This chapter lists the custom security requirements which have been defined for the modeled target.") + r.pdfColorBlack() + for _, title := range sortedKeysOfSecurityRequirements(parsedModel) { + description := parsedModel.SecurityRequirements[title] + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + html.Write(5, ""+uni(title)+"
") + html.Write(5, uni(description)) + } + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + html.Write(5, "This list is not complete and regulatory or law relevant security requirements have to be "+ + "taken into account as well. Also custom individual security requirements might exist for the project.") +} + +func sortedKeysOfSecurityRequirements(parsedModel *types.ParsedModel) []string { + keys := make([]string, 0) + for k := range parsedModel.SecurityRequirements { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (r *pdfReporter) createAbuseCases(parsedModel *types.ParsedModel) { + r.pdf.SetTextColor(0, 0, 0) + chapTitle := "Abuse Cases" + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{abuse-cases}") + r.currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + html.Write(5, "This chapter lists the custom abuse cases which have been defined for the modeled target.") + r.pdfColorBlack() + for _, title := range sortedKeysOfAbuseCases(parsedModel) { + description := parsedModel.AbuseCases[title] + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + html.Write(5, ""+title+"
") + html.Write(5, description) + } + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + html.Write(5, "This list is not complete and regulatory or law relevant abuse cases have to be "+ + "taken into account as well. Also custom individual abuse cases might exist for the project.") +} + +func sortedKeysOfAbuseCases(parsedModel *types.ParsedModel) []string { + keys := make([]string, 0) + for k := range parsedModel.AbuseCases { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (r *pdfReporter) createQuestions(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + questions := "Questions" + count := len(parsedModel.Questions) + if count == 1 { + questions = "Question" + } + if questionsUnanswered(parsedModel) > 0 { + ColorModelFailure(r.pdf) + } + chapTitle := "Questions: " + strconv.Itoa(questionsUnanswered(parsedModel)) + " / " + strconv.Itoa(count) + " " + questions + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{questions}") + r.currentChapterTitleBreadcrumb = chapTitle + r.pdfColorBlack() + + html := r.pdf.HTMLBasicNew() + html.Write(5, "This chapter lists custom questions that arose during the threat modeling process.") + + if len(parsedModel.Questions) == 0 { + r.pdfColorLightGray() + html.Write(5, "


") + html.Write(5, "No custom questions arose during the threat modeling process.") + } + r.pdfColorBlack() + for _, question := range sortedKeysOfQuestions(parsedModel) { + answer := parsedModel.Questions[question] + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdfColorBlack() + if len(strings.TrimSpace(answer)) > 0 { + html.Write(5, ""+uni(question)+"
") + html.Write(5, ""+uni(strings.TrimSpace(answer))+"") + } else { + ColorModelFailure(r.pdf) + html.Write(5, ""+uni(question)+"
") + r.pdfColorLightGray() + html.Write(5, "- answer pending -") + r.pdfColorBlack() + } + } +} + +func sortedKeysOfQuestions(parsedModel *types.ParsedModel) []string { + keys := make([]string, 0) + for k := range parsedModel.Questions { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (r *pdfReporter) createTagListing(parsedModel *types.ParsedModel) { + r.pdf.SetTextColor(0, 0, 0) + chapTitle := "Tag Listing" + r.addHeadline(chapTitle, false) + r.defineLinkTarget("{tag-listing}") + r.currentChapterTitleBreadcrumb = chapTitle + + html := r.pdf.HTMLBasicNew() + html.Write(5, "This chapter lists what tags are used by which elements.") + r.pdfColorBlack() + sorted := parsedModel.TagsAvailable + sort.Strings(sorted) + for _, tag := range sorted { + description := "" // TODO: add some separation texts to distinguish between technical assets and data assets etc. for example? + for _, techAsset := range sortedTechnicalAssetsByTitle(parsedModel) { + if contains(techAsset.Tags, tag) { + if len(description) > 0 { + description += ", " + } + description += techAsset.Title + } + for _, commLink := range techAsset.CommunicationLinksSorted() { + if contains(commLink.Tags, tag) { + if len(description) > 0 { + description += ", " + } + description += commLink.Title + } + } + } + for _, dataAsset := range sortedDataAssetsByTitle(parsedModel) { + if contains(dataAsset.Tags, tag) { + if len(description) > 0 { + description += ", " + } + description += dataAsset.Title + } + } + for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) { + if contains(trustBoundary.Tags, tag) { + if len(description) > 0 { + description += ", " + } + description += trustBoundary.Title + } + } + for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) { + if contains(sharedRuntime.Tags, tag) { + if len(description) > 0 { + description += ", " + } + description += sharedRuntime.Title + } + } + if len(description) > 0 { + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdfColorBlack() + html.Write(5, ""+tag+"
") + html.Write(5, description) + } + } +} + +func sortedSharedRuntimesByTitle(parsedModel *types.ParsedModel) []types.SharedRuntime { + result := make([]types.SharedRuntime, 0) + for _, runtime := range parsedModel.SharedRuntimes { + result = append(result, runtime) + } + sort.Sort(types.BySharedRuntimeTitleSort(result)) + return result +} + +func sortedTechnicalAssetsByTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset { + assets := make([]types.TechnicalAsset, 0) + for _, asset := range parsedModel.TechnicalAssets { + assets = append(assets, asset) + } + sort.Sort(types.ByTechnicalAssetTitleSort(assets)) + return assets +} + +func (r *pdfReporter) createRiskCategories(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + // category title + title := "Identified Risks by Vulnerability Category" + r.pdfColorBlack() + r.addHeadline(title, false) + r.defineLinkTarget("{intro-risks-by-vulnerability-category}") + html := r.pdf.HTMLBasicNew() + var text strings.Builder + text.WriteString("In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " + + "of which " + + "" + strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel))) + " are rated as critical, " + + "" + strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel))) + " as high, " + + "" + strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel))) + " as elevated, " + + "" + strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel))) + " as medium, " + + "and " + strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel))) + " as low. " + + "

These risks are distributed across " + strconv.Itoa(len(parsedModel.GeneratedRisksByCategory)) + " vulnerability categories. ") + text.WriteString("The following sub-chapters of this section describe each identified risk category.") // TODO more explanation text + html.Write(5, text.String()) + text.Reset() + r.currentChapterTitleBreadcrumb = title + for _, category := range types.SortedRiskCategories(parsedModel) { + risksStr := types.SortedRisksOfCategory(parsedModel, category) + + // category color + switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 { + r.pdfColorBlack() + } + + // category title + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk" + if len(risksStr) != 1 { + suffix += "s" + } + title := category.Title + ": " + suffix + r.addHeadline(uni(title), true) + r.pdfColorBlack() + r.defineLinkTarget("{" + category.Id + "}") + r.currentChapterTitleBreadcrumb = title + + // category details + var text strings.Builder + cweLink := "n/a" + if category.CWE > 0 { + cweLink = "CWE " + + strconv.Itoa(category.CWE) + "" + } + text.WriteString("Description (" + category.STRIDE.Title() + "): " + cweLink + "

") + text.WriteString(category.Description) + text.WriteString("


Impact

") + text.WriteString(category.Impact) + text.WriteString("


Detection Logic

") + text.WriteString(category.DetectionLogic) + text.WriteString("


Risk Rating

") + text.WriteString(category.RiskAssessment) + html.Write(5, text.String()) + text.Reset() + ColorRiskStatusFalsePositive(r.pdf) + text.WriteString("


False Positives

") + text.WriteString(category.FalsePositives) + html.Write(5, text.String()) + text.Reset() + ColorRiskStatusMitigated(r.pdf) + text.WriteString("


Mitigation (" + category.Function.Title() + "): " + category.Action + "

") + text.WriteString(category.Mitigation) + + asvsChapter := category.ASVS + if len(asvsChapter) == 0 { + text.WriteString("

ASVS Chapter: n/a") + } else { + text.WriteString("

ASVS Chapter: " + asvsChapter + "") + } + + cheatSheetLink := category.CheatSheet + if len(cheatSheetLink) == 0 { + cheatSheetLink = "n/a" + } else { + lastLinkParts := strings.Split(cheatSheetLink, "/") + linkText := lastLinkParts[len(lastLinkParts)-1] + if strings.HasSuffix(linkText, ".html") || strings.HasSuffix(linkText, ".htm") { + var extension = filepath.Ext(linkText) + linkText = linkText[0 : len(linkText)-len(extension)] + } + cheatSheetLink = "" + linkText + "" + } + text.WriteString("
Cheat Sheet: " + cheatSheetLink) + + text.WriteString("


Check

") + text.WriteString(category.Check) + + html.Write(5, text.String()) + text.Reset() + r.pdf.SetTextColor(0, 0, 0) + + // risk details + r.pageBreak() + r.pdf.SetY(36) + text.WriteString("Risk Findings

") + times := strconv.Itoa(len(risksStr)) + " time" + if len(risksStr) > 1 { + times += "s" + } + text.WriteString("The risk " + category.Title + " was found " + times + " in the analyzed architecture to be " + + "potentially possible. Each spot should be checked individually by reviewing the implementation whether all " + + "controls have been applied properly in order to mitigate each risk.
") + html.Write(5, text.String()) + text.Reset() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.
") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + oldLeft, _, _, _ := r.pdf.GetMargins() + headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false + for _, risk := range risksStr { + text.WriteString("
") + html.Write(5, text.String()) + text.Reset() + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } + switch risk.Severity { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + if !headlineCriticalWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft) + text.WriteString("
Critical Risk Severity

") + html.Write(5, text.String()) + text.Reset() + headlineCriticalWritten = true + } + case types.HighSeverity: + ColorHighRisk(r.pdf) + if !headlineHighWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft) + text.WriteString("
High Risk Severity

") + html.Write(5, text.String()) + text.Reset() + headlineHighWritten = true + } + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + if !headlineElevatedWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft) + text.WriteString("
Elevated Risk Severity

") + html.Write(5, text.String()) + text.Reset() + headlineElevatedWritten = true + } + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + if !headlineMediumWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft) + text.WriteString("
Medium Risk Severity

") + html.Write(5, text.String()) + text.Reset() + headlineMediumWritten = true + } + case types.LowSeverity: + ColorLowRisk(r.pdf) + if !headlineLowWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft) + text.WriteString("
Low Risk Severity

") + html.Write(5, text.String()) + text.Reset() + headlineLowWritten = true + } + default: + r.pdfColorBlack() + } + if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + r.pdfColorBlack() + } + posY := r.pdf.GetY() + r.pdf.SetLeftMargin(oldLeft + 10) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.") + text.WriteString("
") + html.Write(5, text.String()) + text.Reset() + r.pdfColorGray() + r.pdf.SetFont("Helvetica", "", fontSizeVerySmall) + r.pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + if len(risk.MostRelevantSharedRuntimeId) > 0 { + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantSharedRuntimeId]) + } else if len(risk.MostRelevantTrustBoundaryId) > 0 { + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantTrustBoundaryId]) + } else if len(risk.MostRelevantTechnicalAssetId) > 0 { + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantTechnicalAssetId]) + } + r.writeRiskTrackingStatus(parsedModel, risk) + r.pdf.SetLeftMargin(oldLeft) + html.Write(5, text.String()) + text.Reset() + } + r.pdf.SetLeftMargin(oldLeft) + } +} + +func (r *pdfReporter) writeRiskTrackingStatus(parsedModel *types.ParsedModel, risk types.Risk) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + tracking := risk.GetRiskTracking(parsedModel) + r.pdfColorBlack() + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + switch tracking.Status { + case types.Unchecked: + ColorRiskStatusUnchecked(r.pdf) + case types.InDiscussion: + ColorRiskStatusInDiscussion(r.pdf) + case types.Accepted: + ColorRiskStatusAccepted(r.pdf) + case types.InProgress: + ColorRiskStatusInProgress(r.pdf) + case types.Mitigated: + ColorRiskStatusMitigated(r.pdf) + case types.FalsePositive: + ColorRiskStatusFalsePositive(r.pdf) + default: + r.pdfColorBlack() + } + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + if tracking.Status == types.Unchecked { + r.pdf.SetFont("Helvetica", "B", fontSizeSmall) + } + r.pdf.CellFormat(25, 4, tracking.Status.Title(), "0", 0, "B", false, 0, "") + if tracking.Status != types.Unchecked { + dateStr := tracking.Date.Format("2006-01-02") + if dateStr == "0001-01-01" { + dateStr = "" + } + justificationStr := tracking.Justification + r.pdfColorGray() + r.pdf.CellFormat(20, 4, dateStr, "0", 0, "B", false, 0, "") + r.pdf.CellFormat(35, 4, uni(tracking.CheckedBy), "0", 0, "B", false, 0, "") + r.pdf.CellFormat(35, 4, uni(tracking.Ticket), "0", 0, "B", false, 0, "") + r.pdf.Ln(-1) + r.pdfColorBlack() + r.pdf.CellFormat(10, 4, "", "0", 0, "", false, 0, "") + r.pdf.MultiCell(170, 4, uni(justificationStr), "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + } else { + r.pdf.Ln(-1) + } + r.pdfColorBlack() +} + +func (r *pdfReporter) createTechnicalAssets(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + // category title + title := "Identified Risks by Technical Asset" + r.pdfColorBlack() + r.addHeadline(title, false) + r.defineLinkTarget("{intro-risks-by-technical-asset}") + html := r.pdf.HTMLBasicNew() + var text strings.Builder + text.WriteString("In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " + + "of which " + + "" + strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel))) + " are rated as critical, " + + "" + strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel))) + " as high, " + + "" + strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel))) + " as elevated, " + + "" + strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel))) + " as medium, " + + "and " + strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel))) + " as low. " + + "

These risks are distributed across " + strconv.Itoa(len(parsedModel.InScopeTechnicalAssets())) + " in-scope technical assets. ") + text.WriteString("The following sub-chapters of this section describe each identified risk grouped by technical asset. ") // TODO more explanation text + text.WriteString("The RAA value of a technical asset is the calculated \"Relative Attacker Attractiveness\" value in percent.") + html.Write(5, text.String()) + text.Reset() + r.currentChapterTitleBreadcrumb = title + for _, technicalAsset := range sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel) { + risksStr := technicalAsset.GeneratedRisks(parsedModel) + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk" + if len(risksStr) != 1 { + suffix += "s" + } + if technicalAsset.OutOfScope { + r.pdfColorOutOfScope() + suffix = "out-of-scope" + } else { + switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + case types.HighSeverity: + ColorHighRisk(r.pdf) + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + case types.LowSeverity: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 { + r.pdfColorBlack() + } + } + + // asset title + title := technicalAsset.Title + ": " + suffix + r.addHeadline(uni(title), true) + r.pdfColorBlack() + r.defineLinkTarget("{" + technicalAsset.Id + "}") + r.currentChapterTitleBreadcrumb = title + + // asset description + html := r.pdf.HTMLBasicNew() + var text strings.Builder + text.WriteString("Description

") + text.WriteString(uni(technicalAsset.Description)) + html.Write(5, text.String()) + text.Reset() + r.pdf.SetTextColor(0, 0, 0) + + // and more metadata of asset in tabular view + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.Ln(-1) + if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdfColorBlack() + r.pdf.CellFormat(190, 6, "Identified Risks of Asset", "0", 0, "", false, 0, "") + r.pdfColorGray() + oldLeft, _, _, _ := r.pdf.GetMargins() + if len(risksStr) > 0 { + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(15) + /* + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(185, 6, strconv.Itoa(len(risksStr))+" risksStr in total were identified", "0", 0, "", false, 0, "") + */ + headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false + r.pdf.Ln(-1) + for _, risk := range risksStr { + text.WriteString("
") + html.Write(5, text.String()) + text.Reset() + if r.pdf.GetY() > 250 { // 250 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + switch risk.Severity { + case types.CriticalSeverity: + ColorCriticalRisk(r.pdf) + if !headlineCriticalWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft + 3) + html.Write(5, "
Critical Risk Severity

") + headlineCriticalWritten = true + } + case types.HighSeverity: + ColorHighRisk(r.pdf) + if !headlineHighWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft + 3) + html.Write(5, "
High Risk Severity

") + headlineHighWritten = true + } + case types.ElevatedSeverity: + ColorElevatedRisk(r.pdf) + if !headlineElevatedWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft + 3) + html.Write(5, "
Elevated Risk Severity

") + headlineElevatedWritten = true + } + case types.MediumSeverity: + ColorMediumRisk(r.pdf) + if !headlineMediumWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft + 3) + html.Write(5, "
Medium Risk Severity

") + headlineMediumWritten = true + } + case types.LowSeverity: + ColorLowRisk(r.pdf) + if !headlineLowWritten { + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(oldLeft + 3) + html.Write(5, "
Low Risk Severity

") + headlineLowWritten = true + } + default: + r.pdfColorBlack() + } + if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + r.pdfColorBlack() + } + posY := r.pdf.GetY() + r.pdf.SetLeftMargin(oldLeft + 10) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.") + text.WriteString("
") + html.Write(5, text.String()) + text.Reset() + + r.pdf.SetFont("Helvetica", "", fontSizeVerySmall) + r.pdfColorGray() + r.pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false) + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.CategoryId]) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.writeRiskTrackingStatus(parsedModel, risk) + r.pdf.SetLeftMargin(oldLeft) + } + } else { + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetLeftMargin(15) + text := "No risksStr were identified." + if technicalAsset.OutOfScope { + text = "Asset was defined as out-of-scope." + } + html.Write(5, text) + r.pdf.Ln(-1) + } + r.pdf.SetLeftMargin(oldLeft) + + r.pdf.Ln(-1) + r.pdf.Ln(4) + if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 6, "Asset Information", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Id, "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Type.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Usage.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "RAA:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + textRAA := fmt.Sprintf("%.0f", technicalAsset.RAA) + " %" + if technicalAsset.OutOfScope { + r.pdfColorGray() + textRAA = "out-of-scope" + } + r.pdf.MultiCell(145, 6, textRAA, "0", "0", false) + r.pdfColorBlack() + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Size:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Size.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Technology:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Technology.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := technicalAsset.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Internet:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Internet), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Machine:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Machine.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Encryption:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, technicalAsset.Encryption.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Multi-Tenant:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.MultiTenant), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Redundant:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Redundant), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Custom-Developed:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.CustomDevelopedParts), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Client by Human:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.UsedAsClientByHuman), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Data Processed:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsProcessedText := "" + for _, dataAsset := range technicalAsset.DataAssetsProcessedSorted(parsedModel) { + if len(dataAssetsProcessedText) > 0 { + dataAssetsProcessedText += ", " + } + dataAssetsProcessedText += dataAsset.Title + } + if len(dataAssetsProcessedText) == 0 { + r.pdfColorGray() + dataAssetsProcessedText = "none" + } + r.pdf.MultiCell(145, 6, uni(dataAssetsProcessedText), "0", "0", false) + + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Data Stored:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsStoredText := "" + for _, dataAsset := range technicalAsset.DataAssetsStoredSorted(parsedModel) { + if len(dataAssetsStoredText) > 0 { + dataAssetsStoredText += ", " + } + dataAssetsStoredText += dataAsset.Title + } + if len(dataAssetsStoredText) == 0 { + r.pdfColorGray() + dataAssetsStoredText = "none" + } + r.pdf.MultiCell(145, 6, uni(dataAssetsStoredText), "0", "0", false) + + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Formats Accepted:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + formatsAcceptedText := "" + for _, formatAccepted := range technicalAsset.DataFormatsAcceptedSorted() { + if len(formatsAcceptedText) > 0 { + formatsAcceptedText += ", " + } + formatsAcceptedText += formatAccepted.Title() + } + if len(formatsAcceptedText) == 0 { + r.pdfColorGray() + formatsAcceptedText = "none of the special data formats accepted" + } + r.pdf.MultiCell(145, 6, formatsAcceptedText, "0", "0", false) + + r.pdf.Ln(-1) + r.pdf.Ln(4) + if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 6, "Asset Rating", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, uni(technicalAsset.Owner), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, technicalAsset.Confidentiality.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, technicalAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, technicalAsset.Integrity.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, technicalAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, technicalAsset.Availability.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, technicalAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, uni(technicalAsset.JustificationCiaRating), "0", "0", false) + + if technicalAsset.OutOfScope { + r.pdf.Ln(-1) + r.pdf.Ln(4) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 6, "Asset Out-of-Scope Justification", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.MultiCell(190, 6, uni(technicalAsset.JustificationOutOfScope), "0", "0", false) + r.pdf.Ln(-1) + } + r.pdf.Ln(-1) + + if len(technicalAsset.CommunicationLinks) > 0 { + r.pdf.Ln(-1) + if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 6, "Outgoing Communication Links: "+strconv.Itoa(len(technicalAsset.CommunicationLinks)), "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Target technical asset names are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + for _, outgoingCommLink := range technicalAsset.CommunicationLinksSorted() { + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(185, 6, uni(outgoingCommLink.Title)+" (outgoing)", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.MultiCell(185, 6, uni(outgoingCommLink.Description), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Target:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(125, 6, uni(parsedModel.TechnicalAssets[outgoingCommLink.TargetId].Title), "0", "0", false) + r.pdf.Link(60, r.pdf.GetY()-5, 70, 5, r.tocLinkIdByAssetId[outgoingCommLink.TargetId]) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, outgoingCommLink.Protocol.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Protocol.IsEncrypted()), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, outgoingCommLink.Authentication.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, outgoingCommLink.Authorization.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Readonly), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, outgoingCommLink.Usage.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := outgoingCommLink.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.VPN), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.IpFiltered), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsSentText := "" + for _, dataAsset := range outgoingCommLink.DataAssetsSentSorted(parsedModel) { + if len(dataAssetsSentText) > 0 { + dataAssetsSentText += ", " + } + dataAssetsSentText += dataAsset.Title + } + if len(dataAssetsSentText) == 0 { + r.pdfColorGray() + dataAssetsSentText = "none" + } + r.pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsReceivedText := "" + for _, dataAsset := range outgoingCommLink.DataAssetsReceivedSorted(parsedModel) { + if len(dataAssetsReceivedText) > 0 { + dataAssetsReceivedText += ", " + } + dataAssetsReceivedText += dataAsset.Title + } + if len(dataAssetsReceivedText) == 0 { + r.pdfColorGray() + dataAssetsReceivedText = "none" + } + r.pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false) + r.pdf.Ln(-1) + } + } + + incomingCommLinks := parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + if len(incomingCommLinks) > 0 { + r.pdf.Ln(-1) + if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 6, "Incoming Communication Links: "+strconv.Itoa(len(incomingCommLinks)), "0", 0, "", false, 0, "") + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Source technical asset names are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.Ln(-1) + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + for _, incomingCommLink := range incomingCommLinks { + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorBlack() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(185, 6, uni(incomingCommLink.Title)+" (incoming)", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.MultiCell(185, 6, uni(incomingCommLink.Description), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdf.Ln(-1) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Source:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, uni(parsedModel.TechnicalAssets[incomingCommLink.SourceId].Title), "0", "0", false) + r.pdf.Link(60, r.pdf.GetY()-5, 70, 5, r.tocLinkIdByAssetId[incomingCommLink.SourceId]) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, incomingCommLink.Protocol.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Protocol.IsEncrypted()), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, incomingCommLink.Authentication.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, incomingCommLink.Authorization.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Readonly), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, incomingCommLink.Usage.String(), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := incomingCommLink.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.VPN), "0", "0", false) + if r.pdf.GetY() > 270 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.IpFiltered), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsSentText := "" + // yep, here we reverse the sent/received direction, as it's the incoming stuff + for _, dataAsset := range incomingCommLink.DataAssetsSentSorted(parsedModel) { + if len(dataAssetsSentText) > 0 { + dataAssetsSentText += ", " + } + dataAssetsSentText += dataAsset.Title + } + if len(dataAssetsSentText) == 0 { + r.pdfColorGray() + dataAssetsSentText = "none" + } + r.pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + dataAssetsReceivedText := "" + // yep, here we reverse the sent/received direction, as it's the incoming stuff + for _, dataAsset := range incomingCommLink.DataAssetsReceivedSorted(parsedModel) { + if len(dataAssetsReceivedText) > 0 { + dataAssetsReceivedText += ", " + } + dataAssetsReceivedText += dataAsset.Title + } + if len(dataAssetsReceivedText) == 0 { + r.pdfColorGray() + dataAssetsReceivedText = "none" + } + r.pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false) + r.pdf.Ln(-1) + } + } + } +} + +func (r *pdfReporter) createDataAssets(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + title := "Identified Data Breach Probabilities by Data Asset" + r.pdfColorBlack() + r.addHeadline(title, false) + r.defineLinkTarget("{intro-risks-by-data-asset}") + html := r.pdf.HTMLBasicNew() + html.Write(5, "In total "+strconv.Itoa(types.TotalRiskCount(parsedModel))+" potential risks have been identified during the threat modeling process "+ + "of which "+ + ""+strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel)))+" are rated as critical, "+ + ""+strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel)))+" as high, "+ + ""+strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel)))+" as elevated, "+ + ""+strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel)))+" as medium, "+ + "and "+strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel)))+" as low. "+ + "

These risks are distributed across "+strconv.Itoa(len(parsedModel.DataAssets))+" data assets. ") + html.Write(5, "The following sub-chapters of this section describe the derived data breach probabilities grouped by data asset.
") // TODO more explanation text + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdfColorGray() + html.Write(5, "Technical asset names and risk IDs are clickable and link to the corresponding chapter.") + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.currentChapterTitleBreadcrumb = title + for _, dataAsset := range sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel) { + if r.pdf.GetY() > 280 { // 280 as only small font previously (not 250) + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + r.pdfColorBlack() + switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) { + case types.Probable: + ColorHighRisk(r.pdf) + case types.Possible: + ColorMediumRisk(r.pdf) + case types.Improbable: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) { + r.pdfColorBlack() + } + risksStr := dataAsset.IdentifiedDataBreachProbabilityRisks(parsedModel) + countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) + suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk" + if len(risksStr) != 1 { + suffix += "s" + } + title := uni(dataAsset.Title) + ": " + suffix + r.addHeadline(title, true) + r.defineLinkTarget("{data:" + dataAsset.Id + "}") + r.pdfColorBlack() + html.Write(5, uni(dataAsset.Description)) + html.Write(5, "

") + + r.pdf.SetFont("Helvetica", "", fontSizeBody) + /* + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Indirect Breach:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + probability := dataAsset.IdentifiedDataBreachProbability() + dataBreachText := probability.String() + switch probability { + case model.Probable: + ColorHighRisk(r.pdf) + case model.Possible: + ColorMediumRisk(r.pdf) + case model.Improbable: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if !dataAsset.IsDataBreachPotentialStillAtRisk() { + r.pdfColorBlack() + dataBreachText = "none" + } + r.pdf.MultiCell(145, 6, dataBreachText, "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + */ + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, dataAsset.Id, "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, dataAsset.Usage.String(), "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Quantity:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, dataAsset.Quantity.String(), "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := dataAsset.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Origin:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, uni(dataAsset.Origin), "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, uni(dataAsset.Owner), "0", "0", false) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, dataAsset.Confidentiality.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, dataAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, dataAsset.Integrity.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, dataAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.CellFormat(40, 6, dataAsset.Availability.String(), "0", 0, "", false, 0, "") + r.pdfColorGray() + r.pdf.CellFormat(115, 6, dataAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.Ln(-1) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, uni(dataAsset.JustificationCiaRating), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Processed by:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + processedByText := "" + for _, dataAsset := range dataAsset.ProcessedByTechnicalAssetsSorted(parsedModel) { + if len(processedByText) > 0 { + processedByText += ", " + } + processedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back + } + if len(processedByText) == 0 { + r.pdfColorGray() + processedByText = "none" + } + r.pdf.MultiCell(145, 6, uni(processedByText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Stored by:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + storedByText := "" + for _, dataAsset := range dataAsset.StoredByTechnicalAssetsSorted(parsedModel) { + if len(storedByText) > 0 { + storedByText += ", " + } + storedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back + } + if len(storedByText) == 0 { + r.pdfColorGray() + storedByText = "none" + } + r.pdf.MultiCell(145, 6, uni(storedByText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Sent via:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + sentViaText := "" + for _, commLink := range dataAsset.SentViaCommLinksSorted(parsedModel) { + if len(sentViaText) > 0 { + sentViaText += ", " + } + sentViaText += commLink.Title // TODO add link to technical asset detail chapter and back + } + if len(sentViaText) == 0 { + r.pdfColorGray() + sentViaText = "none" + } + r.pdf.MultiCell(145, 6, uni(sentViaText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Received via:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + receivedViaText := "" + for _, commLink := range dataAsset.ReceivedViaCommLinksSorted(parsedModel) { + if len(receivedViaText) > 0 { + receivedViaText += ", " + } + receivedViaText += commLink.Title // TODO add link to technical asset detail chapter and back + } + if len(receivedViaText) == 0 { + r.pdfColorGray() + receivedViaText = "none" + } + r.pdf.MultiCell(145, 6, uni(receivedViaText), "0", "0", false) + + /* + // where is this data asset at risk (i.e. why) + risksByTechAssetId := dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId() + techAssetsResponsible := make([]model.TechnicalAsset, 0) + for techAssetId, _ := range risksByTechAssetId { + techAssetsResponsible = append(techAssetsResponsible, parsedModel.TechnicalAssets[techAssetId]) + } + sort.Sort(model.ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk(techAssetsResponsible)) + assetStr := "assets" + if len(techAssetsResponsible) == 1 { + assetStr = "asset" + } + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Risk via:", "0", 0, "", false, 0, "") + if len(techAssetsResponsible) == 0 { + r.pdfColorGray() + r.pdf.MultiCell(145, 6, "This data asset is not directly at risk via any technical asset.", "0", "0", false) + } else { + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, "This data asset is at direct risk via "+strconv.Itoa(len(techAssetsResponsible))+" technical "+assetStr+":", "0", "0", false) + for _, techAssetResponsible := range techAssetsResponsible { + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + switch model.HighestSeverityStillAtRisk(techAssetResponsible.GeneratedRisks()) { + case model.High: + ColorHighRisk(r.pdf) + case model.Medium: + ColorMediumRisk(r.pdf) + case model.Low: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + risksStr := techAssetResponsible.GeneratedRisks() + if len(model.ReduceToOnlyStillAtRisk(risksStr)) == 0 { + r.pdfColorBlack() + } + riskStr := "risksStr" + if len(risksStr) == 1 { + riskStr = "risk" + } + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + posY := r.pdf.GetY() + risksResponsible := techAssetResponsible.GeneratedRisks() + risksResponsibleStillAtRisk := model.ReduceToOnlyStillAtRisk(risksResponsible) + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdf.MultiCell(185, 6, uni(techAssetResponsible.Title)+": "+strconv.Itoa(len(risksResponsibleStillAtRisk))+" / "+strconv.Itoa(len(risksResponsible))+" "+riskStr, "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, tocLinkIdByAssetId[techAssetResponsible.Id]) + } + r.pdfColorBlack() + } + */ + + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Data Breach:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + dataBreachProbability := dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) + riskText := dataBreachProbability.String() + switch dataBreachProbability { + case types.Probable: + ColorHighRisk(r.pdf) + case types.Possible: + ColorMediumRisk(r.pdf) + case types.Improbable: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) { + r.pdfColorBlack() + riskText = "none" + } + r.pdf.MultiCell(145, 6, riskText, "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + + // how can is this data asset be indirectly lost (i.e. why) + dataBreachRisksStillAtRisk := dataAsset.IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel) + types.SortByDataBreachProbability(dataBreachRisksStillAtRisk, parsedModel) + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Data Breach Risks:", "0", 0, "", false, 0, "") + if len(dataBreachRisksStillAtRisk) == 0 { + r.pdfColorGray() + r.pdf.MultiCell(145, 6, "This data asset has no data breach potential.", "0", "0", false) + } else { + r.pdfColorBlack() + riskRemainingStr := "risksStr" + if countStillAtRisk == 1 { + riskRemainingStr = "risk" + } + r.pdf.MultiCell(145, 6, "This data asset has data breach potential because of "+ + ""+strconv.Itoa(countStillAtRisk)+" remaining "+riskRemainingStr+":", "0", "0", false) + for _, dataBreachRisk := range dataBreachRisksStillAtRisk { + if r.pdf.GetY() > 280 { // 280 as only small font here + r.pageBreak() + r.pdf.SetY(36) + } + switch dataBreachRisk.DataBreachProbability { + case types.Probable: + ColorHighRisk(r.pdf) + case types.Possible: + ColorMediumRisk(r.pdf) + case types.Improbable: + ColorLowRisk(r.pdf) + default: + r.pdfColorBlack() + } + if !dataBreachRisk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + r.pdfColorBlack() + } + r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") + posY := r.pdf.GetY() + r.pdf.SetFont("Helvetica", "", fontSizeVerySmall) + r.pdf.MultiCell(185, 5, dataBreachRisk.DataBreachProbability.Title()+": "+uni(dataBreachRisk.SyntheticId), "0", "0", false) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[dataBreachRisk.CategoryId]) + } + r.pdfColorBlack() + } + } +} + +func (r *pdfReporter) createTrustBoundaries(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + title := "Trust Boundaries" + r.pdfColorBlack() + r.addHeadline(title, false) + + html := r.pdf.HTMLBasicNew() + word := "has" + if len(parsedModel.TrustBoundaries) > 1 { + word = "have" + } + html.Write(5, "In total "+strconv.Itoa(len(parsedModel.TrustBoundaries))+" trust boundaries "+word+" been "+ + "modeled during the threat modeling process.") + r.currentChapterTitleBreadcrumb = title + for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) { + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + ColorTwilight(r.pdf) + if !trustBoundary.Type.IsNetworkBoundary() { + r.pdfColorLightGray() + } + html.Write(5, ""+uni(trustBoundary.Title)+"
") + r.defineLinkTarget("{boundary:" + trustBoundary.Id + "}") + html.Write(5, uni(trustBoundary.Description)) + html.Write(5, "

") + + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, trustBoundary.Id, "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "") + ColorTwilight(r.pdf) + if !trustBoundary.Type.IsNetworkBoundary() { + r.pdfColorLightGray() + } + r.pdf.MultiCell(145, 6, trustBoundary.Type.String(), "0", "0", false) + r.pdfColorBlack() + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := trustBoundary.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Assets inside:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + assetsInsideText := "" + for _, assetKey := range trustBoundary.TechnicalAssetsInside { + if len(assetsInsideText) > 0 { + assetsInsideText += ", " + } + assetsInsideText += parsedModel.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back + } + if len(assetsInsideText) == 0 { + r.pdfColorGray() + assetsInsideText = "none" + } + r.pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Boundaries nested:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + boundariesNestedText := "" + for _, assetKey := range trustBoundary.TrustBoundariesNested { + if len(boundariesNestedText) > 0 { + boundariesNestedText += ", " + } + boundariesNestedText += parsedModel.TrustBoundaries[assetKey].Title + } + if len(boundariesNestedText) == 0 { + r.pdfColorGray() + boundariesNestedText = "none" + } + r.pdf.MultiCell(145, 6, uni(boundariesNestedText), "0", "0", false) + } +} + +func questionsUnanswered(parsedModel *types.ParsedModel) int { + result := 0 + for _, answer := range parsedModel.Questions { + if len(strings.TrimSpace(answer)) == 0 { + result++ + } + } + return result +} + +func (r *pdfReporter) createSharedRuntimes(parsedModel *types.ParsedModel) { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + title := "Shared Runtimes" + r.pdfColorBlack() + r.addHeadline(title, false) + + html := r.pdf.HTMLBasicNew() + word, runtime := "has", "runtime" + if len(parsedModel.SharedRuntimes) > 1 { + word, runtime = "have", "runtimes" + } + html.Write(5, "In total "+strconv.Itoa(len(parsedModel.SharedRuntimes))+" shared "+runtime+" "+word+" been "+ + "modeled during the threat modeling process.") + r.currentChapterTitleBreadcrumb = title + for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) { + r.pdfColorBlack() + if r.pdf.GetY() > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + html.Write(5, "


") + } + html.Write(5, ""+uni(sharedRuntime.Title)+"
") + r.defineLinkTarget("{runtime:" + sharedRuntime.Id + "}") + html.Write(5, uni(sharedRuntime.Description)) + html.Write(5, "

") + + r.pdf.SetFont("Helvetica", "", fontSizeBody) + + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(145, 6, sharedRuntime.Id, "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + tagsUsedText := "" + sorted := sharedRuntime.Tags + sort.Strings(sorted) + for _, tag := range sorted { + if len(tagsUsedText) > 0 { + tagsUsedText += ", " + } + tagsUsedText += tag + } + if len(tagsUsedText) == 0 { + r.pdfColorGray() + tagsUsedText = "none" + } + r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) + + if r.pdf.GetY() > 265 { + r.pageBreak() + r.pdf.SetY(36) + } + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(40, 6, "Assets running:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + assetsInsideText := "" + for _, assetKey := range sharedRuntime.TechnicalAssetsRunning { + if len(assetsInsideText) > 0 { + assetsInsideText += ", " + } + assetsInsideText += parsedModel.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back + } + if len(assetsInsideText) == 0 { + r.pdfColorGray() + assetsInsideText = "none" + } + r.pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false) + } +} + +func (r *pdfReporter) createRiskRulesChecked(parsedModel *types.ParsedModel, modelFilename string, skipRiskRules string, buildTimestamp string, modelHash string, customRiskRules map[string]*model.CustomRisk) { + r.pdf.SetTextColor(0, 0, 0) + title := "Risk Rules Checked by Threagile" + r.addHeadline(title, false) + r.defineLinkTarget("{risk-rules-checked}") + r.currentChapterTitleBreadcrumb = title + + html := r.pdf.HTMLBasicNew() + var strBuilder strings.Builder + r.pdfColorGray() + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + timestamp := time.Now() + strBuilder.WriteString("Threagile Version: " + docs.ThreagileVersion) + strBuilder.WriteString("
Threagile Build Timestamp: " + buildTimestamp) + strBuilder.WriteString("
Threagile Execution Timestamp: " + timestamp.Format("20060102150405")) + strBuilder.WriteString("
Model Filename: " + modelFilename) + strBuilder.WriteString("
Model Hash (SHA256): " + modelHash) + html.Write(5, strBuilder.String()) + strBuilder.Reset() + r.pdfColorBlack() + r.pdf.SetFont("Helvetica", "", fontSizeBody) + strBuilder.WriteString("

Threagile (see https://threagile.io for more details) is an open-source toolkit for agile threat modeling, created by Christian Schneider (https://christian-schneider.net): It allows to model an architecture with its assets in an agile fashion as a YAML file " + + "directly inside the IDE. Upon execution of the Threagile toolkit all standard risk rules (as well as individual custom rules if present) " + + "are checked against the architecture model. At the time the Threagile toolkit was executed on the model input file " + + "the following risk rules were checked:") + html.Write(5, strBuilder.String()) + strBuilder.Reset() + + // TODO use the new run system to discover risk rules instead of hard-coding them here: + skippedRules := strings.Split(skipRiskRules, ",") + skipped := "" + r.pdf.Ln(-1) + + for id, customRule := range customRiskRules { + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + if contains(skippedRules, id) { + skipped = "SKIPPED - " + } else { + skipped = "" + } + r.pdf.CellFormat(190, 3, skipped+customRule.Category.Title, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdf.CellFormat(190, 6, id, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "I", fontSizeBody) + r.pdf.CellFormat(190, 6, "Custom Risk Rule", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, customRule.Category.STRIDE.Title(), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, firstParagraph(customRule.Category.Description), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, customRule.Category.DetectionLogic, "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, customRule.Category.RiskAssessment, "0", "0", false) + } + + for _, key := range sortedKeysOfIndividualRiskCategories(parsedModel) { + individualRiskCategory := parsedModel.IndividualRiskCategories[key] + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + r.pdf.CellFormat(190, 3, individualRiskCategory.Title, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdf.CellFormat(190, 6, individualRiskCategory.Id, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "I", fontSizeBody) + r.pdf.CellFormat(190, 6, "Individual Risk Category", "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, individualRiskCategory.STRIDE.Title(), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, firstParagraph(individualRiskCategory.Description), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, individualRiskCategory.DetectionLogic, "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, individualRiskCategory.RiskAssessment, "0", "0", false) + } + + for _, rule := range risks.GetBuiltInRiskRules() { + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "B", fontSizeBody) + if contains(skippedRules, rule.Category().Id) { + skipped = "SKIPPED - " + } else { + skipped = "" + } + r.pdf.CellFormat(190, 3, skipped+rule.Category().Title, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeSmall) + r.pdf.CellFormat(190, 6, rule.Category().Id, "0", 0, "", false, 0, "") + r.pdf.Ln(-1) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, rule.Category().STRIDE.Title(), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, firstParagraph(rule.Category().Description), "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, rule.Category().DetectionLogic, "0", "0", false) + r.pdfColorGray() + r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") + r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") + r.pdfColorBlack() + r.pdf.MultiCell(160, 6, rule.Category().RiskAssessment, "0", "0", false) + } +} + +func (r *pdfReporter) createTargetDescription(parsedModel *types.ParsedModel, baseFolder string) error { + uni := r.pdf.UnicodeTranslatorFromDescriptor("") + r.pdf.SetTextColor(0, 0, 0) + title := "Application Overview" + r.addHeadline(title, false) + r.defineLinkTarget("{target-overview}") + r.currentChapterTitleBreadcrumb = title + + var intro strings.Builder + html := r.pdf.HTMLBasicNew() + + intro.WriteString("Business Criticality

") + intro.WriteString("The overall business criticality of \"" + uni(parsedModel.Title) + "\" was rated as:

") + html.Write(5, intro.String()) + criticality := parsedModel.BusinessCriticality + intro.Reset() + r.pdfColorGray() + intro.WriteString("( ") + if criticality == types.Archive { + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + intro.WriteString("" + strings.ToUpper(types.Archive.String()) + "") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorGray() + } else { + intro.WriteString(types.Archive.String()) + } + intro.WriteString(" | ") + if criticality == types.Operational { + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + intro.WriteString("" + strings.ToUpper(types.Operational.String()) + "") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorGray() + } else { + intro.WriteString(types.Operational.String()) + } + intro.WriteString(" | ") + if criticality == types.Important { + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + intro.WriteString("" + strings.ToUpper(types.Important.String()) + "") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorGray() + } else { + intro.WriteString(types.Important.String()) + } + intro.WriteString(" | ") + if criticality == types.Critical { + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + intro.WriteString("" + strings.ToUpper(types.Critical.String()) + "") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorGray() + } else { + intro.WriteString(types.Critical.String()) + } + intro.WriteString(" | ") + if criticality == types.MissionCritical { + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + intro.WriteString("" + strings.ToUpper(types.MissionCritical.String()) + "") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorGray() + } else { + intro.WriteString(types.MissionCritical.String()) + } + intro.WriteString(" )") + html.Write(5, intro.String()) + intro.Reset() + r.pdfColorBlack() + + intro.WriteString("


Business Overview

") + intro.WriteString(uni(parsedModel.BusinessOverview.Description)) + html.Write(5, intro.String()) + intro.Reset() + err := r.addCustomImages(parsedModel.BusinessOverview.Images, baseFolder, html) + if err != nil { + return fmt.Errorf("error adding custom images: %w", err) + } + + intro.WriteString("


Technical Overview

") + intro.WriteString(uni(parsedModel.TechnicalOverview.Description)) + html.Write(5, intro.String()) + intro.Reset() + err = r.addCustomImages(parsedModel.TechnicalOverview.Images, baseFolder, html) + if err != nil { + return fmt.Errorf("error adding custom images: %w", err) + } + return nil +} + +func (r *pdfReporter) addCustomImages(customImages []map[string]string, baseFolder string, html gofpdf.HTMLBasicType) error { + var text strings.Builder + for _, customImage := range customImages { + for imageFilename := range customImage { + imageFilenameWithoutPath := filepath.Base(imageFilename) + // check JPEG, PNG or GIF + extension := strings.ToLower(filepath.Ext(imageFilenameWithoutPath)) + if extension == ".jpeg" || extension == ".jpg" || extension == ".png" || extension == ".gif" { + imageFullFilename := filepath.Join(baseFolder, imageFilenameWithoutPath) + heightWhenWidthIsFix, err := getHeightWhenWidthIsFix(imageFullFilename, 180) + if err != nil { + return fmt.Errorf("error getting height of image file: %w", err) + } + if r.pdf.GetY()+heightWhenWidthIsFix > 250 { + r.pageBreak() + r.pdf.SetY(36) + } else { + text.WriteString("

") + } + text.WriteString(customImage[imageFilename] + ":

") + html.Write(5, text.String()) + text.Reset() + + var options gofpdf.ImageOptions + options.ImageType = "" + r.pdf.RegisterImage(imageFullFilename, "") + r.pdf.ImageOptions(imageFullFilename, 15, r.pdf.GetY()+50, 170, 0, true, options, 0, "") + } else { + log.Print("Ignoring custom image file: ", imageFilenameWithoutPath) + } + } + } + return nil +} + +// fileExists checks if a file exists and is not a directory before we +// try using it to prevent further errors. +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func getHeightWhenWidthIsFix(imageFullFilename string, width float64) (float64, error) { + if !fileExists(imageFullFilename) { + return 0, fmt.Errorf("image file does not exist (or is not readable as file): %s", filepath.Base(imageFullFilename)) + } + /* #nosec imageFullFilename is not tainted (see caller restricting it to image files of model folder only) */ + file, err := os.Open(imageFullFilename) + defer func() { _ = file.Close() }() + if err != nil { + return 0, fmt.Errorf("error opening image file: %w", err) + } + img, _, err := image.DecodeConfig(file) + if err != nil { + return 0, fmt.Errorf("error decoding image file: %w", err) + } + return float64(img.Height) / (float64(img.Width) / width), nil +} + +func (r *pdfReporter) embedDataFlowDiagram(diagramFilenamePNG string, tempFolder string) { + r.pdf.SetTextColor(0, 0, 0) + title := "Data-Flow Diagram" + r.addHeadline(title, false) + r.defineLinkTarget("{data-flow-diagram}") + r.currentChapterTitleBreadcrumb = title + + var intro strings.Builder + intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " + + "overview of the data-flow between technical assets. " + + "The RAA value is the calculated Relative Attacker Attractiveness in percent. " + + "For a full high-resolution version of this diagram please refer to the PNG image file alongside this report.") + + html := r.pdf.HTMLBasicNew() + html.Write(5, intro.String()) + + // check to rotate the image if it is wider than high + /* #nosec diagramFilenamePNG is not tainted */ + imagePath, _ := os.Open(diagramFilenamePNG) + defer func() { _ = imagePath.Close() }() + srcImage, _, _ := image.Decode(imagePath) + srcDimensions := srcImage.Bounds() + // wider than high? + muchWiderThanHigh := srcDimensions.Dx() > int(float64(srcDimensions.Dy())*1.25) + // fresh page (eventually landscape)? + r.isLandscapePage = false + _ = tempFolder + /* + pinnedWidth, pinnedHeight := 190.0, 210.0 + if dataFlowDiagramFullscreen { + pinnedHeight = 235.0 + if muchWiderThanHigh { + if allowedPdfLandscapePages { + pinnedWidth = 275.0 + isLandscapePage = true + r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4")) + } else { + // so rotate the image left by 90 degrees + // ok, use temp PNG then + // now rotate left by 90 degrees + rotatedFile, err := os.CreateTemp(tempFolder, "diagram-*-.png") + checkErr(err) + defer os.Remove(rotatedFile.Name()) + dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx())) + err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0}) + checkErr(err) + newImage, _ := os.Create(rotatedFile.Name()) + defer newImage.Close() + err = png.Encode(newImage, dstImage) + checkErr(err) + diagramFilenamePNG = rotatedFile.Name() + } + } else { + r.pdf.AddPage() + } + } else { + r.pdf.Ln(10) + }*/ + // embed in PDF + var options gofpdf.ImageOptions + options.ImageType = "" + r.pdf.RegisterImage(diagramFilenamePNG, "") + var maxWidth, maxHeight, newWidth int + var embedWidth, embedHeight float64 + if allowedPdfLandscapePages && muchWiderThanHigh { + maxWidth, maxHeight = 275, 150 + r.isLandscapePage = true + r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4")) + } else { + r.pdf.Ln(10) + maxWidth, maxHeight = 190, 200 // reduced height as a text paragraph is above + } + newWidth = srcDimensions.Dx() / (srcDimensions.Dy() / maxHeight) + if newWidth <= maxWidth { + embedWidth, embedHeight = 0, float64(maxHeight) + } else { + embedWidth, embedHeight = float64(maxWidth), 0 + } + r.pdf.ImageOptions(diagramFilenamePNG, 10, r.pdf.GetY(), embedWidth, embedHeight, true, options, 0, "") + r.isLandscapePage = false + + // add diagram legend page + if embedDiagramLegendPage { + r.pdf.AddPage() + gofpdi.UseImportedTemplate(r.pdf, r.diagramLegendTemplateId, 0, 0, 0, 300) + } +} + +func sortedKeysOfIndividualRiskCategories(parsedModel *types.ParsedModel) []string { + keys := make([]string, 0) + for k := range parsedModel.IndividualRiskCategories { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (r *pdfReporter) embedDataRiskMapping(diagramFilenamePNG string, tempFolder string) { + r.pdf.SetTextColor(0, 0, 0) + title := "Data Mapping" + r.addHeadline(title, false) + r.defineLinkTarget("{data-risk-mapping}") + r.currentChapterTitleBreadcrumb = title + + var intro strings.Builder + intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " + + "distribution of data assets across technical assets. The color matches the identified data breach probability and risk level " + + "(see the \"Data Breach Probabilities\" chapter for more details). " + + "A solid line stands for data is stored by the asset and a dashed one means " + + "data is processed by the asset. For a full high-resolution version of this diagram please refer to the PNG image " + + "file alongside this report.") + + html := r.pdf.HTMLBasicNew() + html.Write(5, intro.String()) + + // TODO dedupe with code from other diagram embedding (almost same code) + // check to rotate the image if it is wider than high + /* #nosec diagramFilenamePNG is not tainted */ + imagePath, _ := os.Open(diagramFilenamePNG) + defer func() { _ = imagePath.Close() }() + srcImage, _, _ := image.Decode(imagePath) + srcDimensions := srcImage.Bounds() + // wider than high? + widerThanHigh := srcDimensions.Dx() > srcDimensions.Dy() + pinnedWidth, pinnedHeight := 190.0, 195.0 + // fresh page (eventually landscape)? + r.isLandscapePage = false + _ = tempFolder + /* + if dataFlowDiagramFullscreen { + pinnedHeight = 235.0 + if widerThanHigh { + if allowedPdfLandscapePages { + pinnedWidth = 275.0 + isLandscapePage = true + r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4")) + } else { + // so rotate the image left by 90 degrees + // ok, use temp PNG then + // now rotate left by 90 degrees + rotatedFile, err := os.CreateTemp(tempFolder, "diagram-*-.png") + checkErr(err) + defer os.Remove(rotatedFile.Name()) + dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx())) + err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0}) + checkErr(err) + newImage, _ := os.Create(rotatedFile.Name()) + defer newImage.Close() + err = png.Encode(newImage, dstImage) + checkErr(err) + diagramFilenamePNG = rotatedFile.Name() + } + } else { + r.pdf.AddPage() + } + } else { + r.pdf.Ln(10) + } + */ + // embed in PDF + r.pdf.Ln(10) + var options gofpdf.ImageOptions + options.ImageType = "" + r.pdf.RegisterImage(diagramFilenamePNG, "") + if widerThanHigh { + pinnedHeight = 0 + } else { + pinnedWidth = 0 + } + r.pdf.ImageOptions(diagramFilenamePNG, 10, r.pdf.GetY(), pinnedWidth, pinnedHeight, true, options, 0, "") + r.isLandscapePage = false +} + +func (r *pdfReporter) writeReportToFile(reportFilename string) error { + err := r.pdf.OutputFileAndClose(reportFilename) + if err != nil { + return fmt.Errorf("error writing PDF report file: %w", err) + } + return nil +} + +func (r *pdfReporter) addHeadline(headline string, small bool) { + r.pdf.AddPage() + gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300) + fontSize := fontSizeHeadline + if small { + fontSize = fontSizeHeadlineSmall + } + r.pdf.SetFont("Helvetica", "B", float64(fontSize)) + r.pdf.Text(11, 40, headline) + r.pdf.SetFont("Helvetica", "", fontSizeBody) + r.pdf.SetX(17) + r.pdf.SetY(46) +} + +func (r *pdfReporter) pageBreak() { + r.pdf.SetDrawColor(0, 0, 0) + r.pdf.SetDashPattern([]float64{}, 0) + r.pdf.AddPage() + gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300) + r.pdf.SetX(17) + r.pdf.SetY(20) +} + +func (r *pdfReporter) pageBreakInLists() { + r.pageBreak() + r.pdf.SetLineWidth(0.25) + r.pdf.SetDrawColor(160, 160, 160) + r.pdf.SetDashPattern([]float64{0.5, 0.5}, 0) +} + +func (r *pdfReporter) pdfColorDataAssets() { + r.pdf.SetTextColor(18, 36, 111) +} +func rgbHexColorDataAssets() string { + return "#12246F" +} + +func (r *pdfReporter) pdfColorTechnicalAssets() { + r.pdf.SetTextColor(18, 36, 111) +} +func rgbHexColorTechnicalAssets() string { + return "#12246F" +} + +func (r *pdfReporter) pdfColorTrustBoundaries() { + r.pdf.SetTextColor(18, 36, 111) +} +func rgbHexColorTrustBoundaries() string { + return "#12246F" +} + +func (r *pdfReporter) pdfColorSharedRuntime() { + r.pdf.SetTextColor(18, 36, 111) +} +func rgbHexColorSharedRuntime() string { + return "#12246F" +} + +func (r *pdfReporter) pdfColorRiskFindings() { + r.pdf.SetTextColor(160, 40, 30) +} + +func rgbHexColorRiskFindings() string { + return "#A0281E" +} + +func (r *pdfReporter) pdfColorDisclaimer() { + r.pdf.SetTextColor(140, 140, 140) +} +func rgbHexColorDisclaimer() string { + return "#8C8C8C" +} + +func (r *pdfReporter) pdfColorOutOfScope() { + r.pdf.SetTextColor(127, 127, 127) +} + +func rgbHexColorOutOfScope() string { + return "#7F7F7F" +} + +func (r *pdfReporter) pdfColorGray() { + r.pdf.SetTextColor(80, 80, 80) +} +func rgbHexColorGray() string { + return "#505050" +} + +func (r *pdfReporter) pdfColorLightGray() { + r.pdf.SetTextColor(100, 100, 100) +} +func rgbHexColorLightGray() string { + return "#646464" +} + +func (r *pdfReporter) pdfColorBlack() { + r.pdf.SetTextColor(0, 0, 0) +} +func rgbHexColorBlack() string { + return "#000000" +} + +func (r *pdfReporter) pdfColorRed() { + r.pdf.SetTextColor(255, 0, 0) +} +func rgbHexColorRed() string { + return "#FF0000" +} diff --git a/risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go b/pkg/security/risks/builtin/accidental-secret-leak-rule.go similarity index 54% rename from risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go rename to pkg/security/risks/builtin/accidental-secret-leak-rule.go index 3f4f9b4c..fe0afee3 100644 --- a/risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go +++ b/pkg/security/risks/builtin/accidental-secret-leak-rule.go @@ -1,11 +1,17 @@ -package accidental_secret_leak +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type AccidentalSecretLeakRule struct{} + +func NewAccidentalSecretLeakRule() *AccidentalSecretLeakRule { + return &AccidentalSecretLeakRule{} +} + +func (*AccidentalSecretLeakRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "accidental-secret-leak", Title: "Accidental Secret Leak", Description: "Sourcecode repositories (including their histories) as well as artifact registries can accidentally contain secrets like " + @@ -20,31 +26,31 @@ func Category() model.RiskCategory { "See for example tools like \"git-secrets\" or \"Talisman\" to have check-in preventive measures for secrets. " + "Consider also to regularly scan your repositories for secrets accidentally checked-in using scanning tools like \"gitleaks\" or \"gitrob\".", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.InformationDisclosure, + Function: types.Operations, + STRIDE: types.InformationDisclosure, DetectionLogic: "In-scope sourcecode repositories and artifact registries.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Usually no false positives.", ModelFailurePossibleReason: false, CWE: 200, } } -func SupportedTags() []string { +func (*AccidentalSecretLeakRule) SupportedTags() []string { return []string{"git", "nexus"} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - techAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *AccidentalSecretLeakRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range parsedModel.SortedTechnicalAssetIDs() { + techAsset := parsedModel.TechnicalAssets[id] if !techAsset.OutOfScope && - (techAsset.Technology == model.SourcecodeRepository || techAsset.Technology == model.ArtifactRegistry) { - var risk model.Risk + (techAsset.Technology == types.SourcecodeRepository || techAsset.Technology == types.ArtifactRegistry) { + var risk types.Risk if techAsset.IsTaggedWithAny("git") { - risk = createRisk(techAsset, "Git", "Git Leak Prevention") + risk = r.createRisk(parsedModel, techAsset, "Git", "Git Leak Prevention") } else { - risk = createRisk(techAsset, "", "") + risk = r.createRisk(parsedModel, techAsset, "", "") } risks = append(risks, risk) } @@ -52,7 +58,7 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset, prefix, details string) model.Risk { +func (r *AccidentalSecretLeakRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, prefix, details string) types.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } @@ -60,28 +66,28 @@ func createRisk(technicalAsset model.TechnicalAsset, prefix, details string) mod if len(details) > 0 { title += ": " + details + "" } - impact := model.LowImpact - if technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical || - technicalAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact + impact := types.LowImpact + if technicalAsset.HighestConfidentiality(parsedModel) >= types.Confidential || + technicalAsset.HighestIntegrity(parsedModel) >= types.Critical || + technicalAsset.HighestAvailability(parsedModel) >= types.Critical { + impact = types.MediumImpact } - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.HighImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.HighImpact } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/code-backdooring/code-backdooring-rule.go b/pkg/security/risks/builtin/code-backdooring-rule.go similarity index 68% rename from risks/built-in/code-backdooring/code-backdooring-rule.go rename to pkg/security/risks/builtin/code-backdooring-rule.go index 1f6e518e..1e19966f 100644 --- a/risks/built-in/code-backdooring/code-backdooring-rule.go +++ b/pkg/security/risks/builtin/code-backdooring-rule.go @@ -1,11 +1,17 @@ -package code_backdooring +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type CodeBackdooringRule struct{} + +func NewCodeBackdooringRule() *CodeBackdooringRule { + return &CodeBackdooringRule{} +} + +func (*CodeBackdooringRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "code-backdooring", Title: "Code Backdooring", Description: "For each build-pipeline component Code Backdooring risks might arise where attackers compromise the build-pipeline " + @@ -20,8 +26,8 @@ func Category() model.RiskCategory { "components on the public internet and also not exposing it in front of unmanaged (out-of-scope) developer clients." + "Also consider the use of code signing to prevent code modifications.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.Tampering, + Function: types.Operations, + STRIDE: types.Tampering, DetectionLogic: "In-scope development relevant technical assets which are either accessed by out-of-scope unmanaged " + "developer clients and/or are directly accessed by any kind of internet-located (non-VPN) component or are themselves directly located " + "on the internet.", @@ -36,27 +42,27 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*CodeBackdooringRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *CodeBackdooringRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range parsedModel.SortedTechnicalAssetIDs() { + technicalAsset := parsedModel.TechnicalAssets[id] if !technicalAsset.OutOfScope && technicalAsset.Technology.IsDevelopmentRelevant() { if technicalAsset.Internet { - risks = append(risks, createRisk(technicalAsset, true)) + risks = append(risks, r.createRisk(parsedModel, technicalAsset, true)) continue } // TODO: ensure that even internet or unmanaged clients coming over a reverse-proxy or load-balancer like component are treated as if it was directly accessed/exposed on the internet or towards unmanaged dev clients //riskByLinkAdded := false - for _, callerLink := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { - caller := model.ParsedModelRoot.TechnicalAssets[callerLink.SourceId] + for _, callerLink := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { + caller := parsedModel.TechnicalAssets[callerLink.SourceId] if (!callerLink.VPN && caller.Internet) || caller.OutOfScope { - risks = append(risks, createRisk(technicalAsset, true)) + risks = append(risks, r.createRisk(parsedModel, technicalAsset, true)) //riskByLinkAdded = true break } @@ -66,17 +72,17 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Risk { +func (r *CodeBackdooringRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, elevatedRisk bool) types.Risk { title := "Code Backdooring risk at " + technicalAsset.Title + "" - impact := model.LowImpact - if technicalAsset.Technology != model.CodeInspectionPlatform { + impact := types.LowImpact + if technicalAsset.Technology != types.CodeInspectionPlatform { if elevatedRisk { - impact = model.MediumImpact + impact = types.MediumImpact } - if technicalAsset.HighestConfidentiality() >= model.Confidential || technicalAsset.HighestIntegrity() >= model.Critical { - impact = model.MediumImpact + if technicalAsset.HighestConfidentiality(input) >= types.Confidential || technicalAsset.HighestIntegrity(input) >= types.Critical { + impact = types.MediumImpact if elevatedRisk { - impact = model.HighImpact + impact = types.HighImpact } } } @@ -84,10 +90,10 @@ func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Ri uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{}) uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true for _, codeDeploymentTargetCommLink := range technicalAsset.CommunicationLinks { - if codeDeploymentTargetCommLink.Usage == model.DevOps { + if codeDeploymentTargetCommLink.Usage == types.DevOps { for _, dataAssetID := range codeDeploymentTargetCommLink.DataAssetsSent { // it appears to be code when elevated integrity rating of sent data asset - if model.ParsedModelRoot.DataAssets[dataAssetID].Integrity >= model.Important { + if input.DataAssets[dataAssetID].Integrity >= types.Important { // here we've got a deployment target which has its data assets at risk via deployment of backdoored code uniqueDataBreachTechnicalAssetIDs[codeDeploymentTargetCommLink.TargetId] = true break @@ -96,20 +102,20 @@ func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Ri } } dataBreachTechnicalAssetIDs := make([]string, 0) - for key, _ := range uniqueDataBreachTechnicalAssetIDs { + for key := range uniqueDataBreachTechnicalAssetIDs { dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key) } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go b/pkg/security/risks/builtin/container-baseimage-backdooring-rule.go similarity index 59% rename from risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go rename to pkg/security/risks/builtin/container-baseimage-backdooring-rule.go index e8d92d37..f77d412d 100644 --- a/risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go +++ b/pkg/security/risks/builtin/container-baseimage-backdooring-rule.go @@ -1,11 +1,17 @@ -package container_baseimage_backdooring +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type ContainerBaseImageBackdooringRule struct{} + +func NewContainerBaseImageBackdooringRule() *ContainerBaseImageBackdooringRule { + return &ContainerBaseImageBackdooringRule{} +} + +func (*ContainerBaseImageBackdooringRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "container-baseimage-backdooring", Title: "Container Base Image Backdooring", Description: "When a technical asset is built using container technologies, Base Image Backdooring risks might arise where " + @@ -20,8 +26,8 @@ func Category() model.RiskCategory { "Also consider using Google's Distroless base images or otherwise very small base images. " + "Regularly execute container image scans with tools checking the layers for vulnerable components.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS/CSVS applied?", - Function: model.Operations, - STRIDE: model.Tampering, + Function: types.Operations, + STRIDE: types.Tampering, DetectionLogic: "In-scope technical assets running as containers.", RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets.", FalsePositives: "Fully trusted (i.e. reviewed and cryptographically signed or similar) base images of containers can be considered " + @@ -31,39 +37,39 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*ContainerBaseImageBackdooringRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && technicalAsset.Machine == model.Container { - risks = append(risks, createRisk(technicalAsset)) +func (r *ContainerBaseImageBackdooringRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range parsedModel.SortedTechnicalAssetIDs() { + technicalAsset := parsedModel.TechnicalAssets[id] + if !technicalAsset.OutOfScope && technicalAsset.Machine == types.Container { + risks = append(risks, r.createRisk(parsedModel, technicalAsset)) } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *ContainerBaseImageBackdooringRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Container Base Image Backdooring risk at " + technicalAsset.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.HighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/container-platform-escape/container-platform-escape-rule.go b/pkg/security/risks/builtin/container-platform-escape-rule.go similarity index 65% rename from risks/built-in/container-platform-escape/container-platform-escape-rule.go rename to pkg/security/risks/builtin/container-platform-escape-rule.go index 520b3d1d..3d32c99f 100644 --- a/risks/built-in/container-platform-escape/container-platform-escape-rule.go +++ b/pkg/security/risks/builtin/container-platform-escape-rule.go @@ -1,11 +1,17 @@ -package container_platform_escape +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type ContainerPlatformEscapeRule struct{} + +func NewContainerPlatformEscapeRule() *ContainerPlatformEscapeRule { + return &ContainerPlatformEscapeRule{} +} + +func (*ContainerPlatformEscapeRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "container-platform-escape", Title: "Container Platform Escape", Description: "Container platforms are especially interesting targets for attackers as they host big parts of a containerized runtime infrastructure. " + @@ -25,10 +31,10 @@ func Category() model.RiskCategory { "Use only trusted base images, verify digital signatures and apply image creation best practices. Also consider using Google's Distroless base images or otherwise very small base images. " + "Apply namespace isolation and nod affinity to separate pods from each other in terms of access and nodes the same style as you separate data.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS or CSVS chapter applied?", - Function: model.Operations, - STRIDE: model.ElevationOfPrivilege, + Function: types.Operations, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "In-scope container platforms.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Container platforms not running parts of the target architecture can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -36,47 +42,47 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*ContainerPlatformEscapeRule) SupportedTags() []string { return []string{"docker", "kubernetes", "openshift"} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && technicalAsset.Technology == model.ContainerPlatform { - risks = append(risks, createRisk(technicalAsset)) +func (r *ContainerPlatformEscapeRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range parsedModel.SortedTechnicalAssetIDs() { + technicalAsset := parsedModel.TechnicalAssets[id] + if !technicalAsset.OutOfScope && technicalAsset.Technology == types.ContainerPlatform { + risks = append(risks, r.createRisk(parsedModel, technicalAsset)) } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *ContainerPlatformEscapeRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Container Platform Escape risk at " + technicalAsset.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.HighImpact } // data breach at all container assets dataBreachTechnicalAssetIDs := make([]string, 0) - for id, techAsset := range model.ParsedModelRoot.TechnicalAssets { - if techAsset.Machine == model.Container { + for id, techAsset := range parsedModel.TechnicalAssets { + if techAsset.Machine == types.Container { dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, id) } } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go b/pkg/security/risks/builtin/cross-site-request-forgery-rule.go similarity index 61% rename from risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go rename to pkg/security/risks/builtin/cross-site-request-forgery-rule.go index 04ed7882..1f19d324 100644 --- a/risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go +++ b/pkg/security/risks/builtin/cross-site-request-forgery-rule.go @@ -1,11 +1,17 @@ -package cross_site_request_forgery +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type CrossSiteRequestForgeryRule struct{} + +func NewCrossSiteRequestForgeryRule() *CrossSiteRequestForgeryRule { + return &CrossSiteRequestForgeryRule{} +} + +func (*CrossSiteRequestForgeryRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "cross-site-request-forgery", Title: "Cross-Site Request Forgery (CSRF)", Description: "When a web application is accessed via web protocols Cross-Site Request Forgery (CSRF) risks might arise.", @@ -19,8 +25,8 @@ func Category() model.RiskCategory { "the same-site flag. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Spoofing, + Function: types.Development, + STRIDE: types.Spoofing, DetectionLogic: "In-scope web applications accessed via typical web access protocols.", RiskAssessment: "The risk rating depends on the integrity rating of the data sent across the communication link.", FalsePositives: "Web applications passing the authentication sate via custom headers instead of cookies can " + @@ -32,49 +38,49 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*CrossSiteRequestForgeryRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *CrossSiteRequestForgeryRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range parsedModel.SortedTechnicalAssetIDs() { + technicalAsset := parsedModel.TechnicalAssets[id] if technicalAsset.OutOfScope || !technicalAsset.Technology.IsWebApplication() { continue } - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + incomingFlows := parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] for _, incomingFlow := range incomingFlows { if incomingFlow.Protocol.IsPotentialWebAccessProtocol() { - likelihood := model.VeryLikely - if incomingFlow.Usage == model.DevOps { - likelihood = model.Likely + likelihood := types.VeryLikely + if incomingFlow.Usage == types.DevOps { + likelihood = types.Likely } - risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood)) + risks = append(risks, r.createRisk(parsedModel, technicalAsset, incomingFlow, likelihood)) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk { - sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] +func (r *CrossSiteRequestForgeryRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk { + sourceAsset := parsedModel.TechnicalAssets[incomingFlow.SourceId] title := "Cross-Site Request Forgery (CSRF) risk at " + technicalAsset.Title + " via " + incomingFlow.Title + " from " + sourceAsset.Title + "" - impact := model.LowImpact - if incomingFlow.HighestIntegrity() == model.MissionCritical { - impact = model.MediumImpact + impact := types.LowImpact + if incomingFlow.HighestIntegrity(parsedModel) == types.MissionCritical { + impact = types.MediumImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: incomingFlow.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + incomingFlow.Id return risk } diff --git a/risks/built-in/cross-site-scripting/cross-site-scripting-rule.go b/pkg/security/risks/builtin/cross-site-scripting-rule.go similarity index 60% rename from risks/built-in/cross-site-scripting/cross-site-scripting-rule.go rename to pkg/security/risks/builtin/cross-site-scripting-rule.go index a6da6781..829de3af 100644 --- a/risks/built-in/cross-site-scripting/cross-site-scripting-rule.go +++ b/pkg/security/risks/builtin/cross-site-scripting-rule.go @@ -1,11 +1,17 @@ -package cross_site_scripting +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type CrossSiteScriptingRule struct{} + +func NewCrossSiteScriptingRule() *CrossSiteScriptingRule { + return &CrossSiteScriptingRule{} +} + +func (*CrossSiteScriptingRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "cross-site-scripting", Title: "Cross-Site Scripting (XSS)", Description: "For each web application Cross-Site Scripting (XSS) risks might arise. In terms " + @@ -18,10 +24,10 @@ func Category() model.RiskCategory { "to avoid DOM-based XSS. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Tampering, + Function: types.Development, + STRIDE: types.Tampering, DetectionLogic: "In-scope web applications.", - RiskAssessment: "The risk rating depends on the sensitivity of the data processed or stored in the web application.", + RiskAssessment: "The risk rating depends on the sensitivity of the data processed in the web application.", FalsePositives: "When the technical asset " + "is not accessed via a browser-like component (i.e not by a human user initiating the request that " + "gets passed through all components until it reaches the web application) this can be considered a false positive.", @@ -30,38 +36,38 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*CrossSiteScriptingRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *CrossSiteScriptingRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if technicalAsset.OutOfScope || !technicalAsset.Technology.IsWebApplication() { // TODO: also mobile clients or rich-clients as long as they use web-view... continue } - risks = append(risks, createRisk(technicalAsset)) + risks = append(risks, r.createRisk(input, technicalAsset)) } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *CrossSiteScriptingRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Cross-Site Scripting (XSS) risk at " + technicalAsset.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical { + impact = types.HighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Likely, impact), - ExploitationLikelihood: model.Likely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Likely, impact), + ExploitationLikelihood: types.Likely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Possible, + DataBreachProbability: types.Possible, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go b/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go new file mode 100644 index 00000000..09fc70b3 --- /dev/null +++ b/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go @@ -0,0 +1,104 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type DosRiskyAccessAcrossTrustBoundaryRule struct{} + +func NewDosRiskyAccessAcrossTrustBoundaryRule() *DosRiskyAccessAcrossTrustBoundaryRule { + return &DosRiskyAccessAcrossTrustBoundaryRule{} +} + +func (*DosRiskyAccessAcrossTrustBoundaryRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "dos-risky-access-across-trust-boundary", + Title: "DoS-risky Access Across Trust-Boundary", + Description: "Assets accessed across trust boundaries with critical or mission-critical availability rating " + + "are more prone to Denial-of-Service (DoS) risks.", + Impact: "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.", + ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html", + Action: "Anti-DoS Measures", + Mitigation: "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. " + + "Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. " + + "Generally applying redundancy on the targeted technical asset reduces the risk of DoS.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Operations, + STRIDE: types.DenialOfService, + DetectionLogic: "In-scope technical assets (excluding " + types.LoadBalancer.String() + ") with " + + "availability rating of " + types.Critical.String() + " or higher which have incoming data-flows across a " + + "network trust-boundary (excluding " + types.DevOps.String() + " usage).", + RiskAssessment: "Matching technical assets with availability rating " + + "of " + types.Critical.String() + " or higher are " + + "at " + types.LowSeverity.String() + " risk. When the availability rating is " + + types.MissionCritical.String() + " and neither a VPN nor IP filter for the incoming data-flow nor redundancy " + + "for the asset is applied, the risk-rating is considered " + types.MediumSeverity.String() + ".", // TODO reduce also, when data-flow authenticated and encrypted? + FalsePositives: "When the accessed target operations are not time- or resource-consuming.", + ModelFailurePossibleReason: false, + CWE: 400, + } +} + +func (*DosRiskyAccessAcrossTrustBoundaryRule) SupportedTags() []string { + return []string{} +} + +func (r *DosRiskyAccessAcrossTrustBoundaryRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope && technicalAsset.Technology != types.LoadBalancer && + technicalAsset.Availability >= types.Critical { + for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { + sourceAsset := input.TechnicalAssets[incomingAccess.SourceId] + if sourceAsset.Technology.IsTrafficForwarding() { + // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human + callersCommLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[sourceAsset.Id] + for _, callersCommLink := range callersCommLinks { + risks = r.checkRisk(input, technicalAsset, callersCommLink, sourceAsset.Title, risks) + } + } else { + risks = r.checkRisk(input, technicalAsset, incomingAccess, "", risks) + } + } + } + } + return risks +} + +func (r *DosRiskyAccessAcrossTrustBoundaryRule) checkRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink, hopBetween string, risks []types.Risk) []types.Risk { + if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) && + !incomingAccess.Protocol.IsProcessLocal() && incomingAccess.Usage != types.DevOps { + highRisk := technicalAsset.Availability == types.MissionCritical && + !incomingAccess.VPN && !incomingAccess.IpFiltered && !technicalAsset.Redundant + risks = append(risks, r.createRisk(technicalAsset, incomingAccess, hopBetween, + input.TechnicalAssets[incomingAccess.SourceId], highRisk)) + } + return risks +} + +func (r *DosRiskyAccessAcrossTrustBoundaryRule) createRisk(techAsset types.TechnicalAsset, dataFlow types.CommunicationLink, hopBetween string, + clientOutsideTrustBoundary types.TechnicalAsset, moreRisky bool) types.Risk { + impact := types.LowImpact + if moreRisky { + impact = types.MediumImpact + } + if len(hopBetween) > 0 { + hopBetween = " forwarded via " + hopBetween + "" + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: "Denial-of-Service risky access of " + techAsset.Title + " by " + clientOutsideTrustBoundary.Title + + " via " + dataFlow.Title + "" + hopBetween, + MostRelevantTechnicalAssetId: techAsset.Id, + MostRelevantCommunicationLinkId: dataFlow.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{}, + } + risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataFlow.Id + return risk +} diff --git a/risks/built-in/incomplete-model/incomplete-model-rule.go b/pkg/security/risks/builtin/incomplete-model-rule.go similarity index 50% rename from risks/built-in/incomplete-model/incomplete-model-rule.go rename to pkg/security/risks/builtin/incomplete-model-rule.go index 55399410..b954d646 100644 --- a/risks/built-in/incomplete-model/incomplete-model-rule.go +++ b/pkg/security/risks/builtin/incomplete-model-rule.go @@ -1,11 +1,17 @@ -package incomplete_model +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type IncompleteModelRule struct{} + +func NewIncompleteModelRule() *IncompleteModelRule { + return &IncompleteModelRule{} +} + +func (*IncompleteModelRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "incomplete-model", Title: "Incomplete Model", Description: "When the threat model contains unknown technologies or transfers data over unknown protocols, this is " + @@ -16,31 +22,31 @@ func Category() model.RiskCategory { Action: "Threat Modeling Completeness", Mitigation: "Try to find out what technology or protocol is used instead of specifying that it is unknown.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.InformationDisclosure, + Function: types.Architecture, + STRIDE: types.InformationDisclosure, DetectionLogic: "All technical assets and communication links with technology type or protocol type specified as unknown.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*IncompleteModelRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *IncompleteModelRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if !technicalAsset.OutOfScope { - if technicalAsset.Technology == model.UnknownTechnology { - risks = append(risks, createRiskTechAsset(technicalAsset)) + if technicalAsset.Technology == types.UnknownTechnology { + risks = append(risks, r.createRiskTechAsset(technicalAsset)) } for _, commLink := range technicalAsset.CommunicationLinks { - if commLink.Protocol == model.UnknownProtocol { - risks = append(risks, createRiskCommLink(technicalAsset, commLink)) + if commLink.Protocol == types.UnknownProtocol { + risks = append(risks, r.createRiskCommLink(technicalAsset, commLink)) } } } @@ -48,35 +54,35 @@ func GenerateRisks() []model.Risk { return risks } -func createRiskTechAsset(technicalAsset model.TechnicalAsset) model.Risk { +func (r *IncompleteModelRule) createRiskTechAsset(technicalAsset types.TechnicalAsset) types.Risk { title := "Unknown Technology specified at technical asset " + technicalAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } -func createRiskCommLink(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink) model.Risk { +func (r *IncompleteModelRule) createRiskCommLink(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink) types.Risk { title := "Unknown Protocol specified for communication link " + commLink.Title + " at technical asset " + technicalAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: commLink.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + commLink.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + commLink.Id + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/ldap-injection/ldap-injection-rule.go b/pkg/security/risks/builtin/ldap-injection-rule.go similarity index 54% rename from risks/built-in/ldap-injection/ldap-injection-rule.go rename to pkg/security/risks/builtin/ldap-injection-rule.go index e76d2706..5f04a719 100644 --- a/risks/built-in/ldap-injection/ldap-injection-rule.go +++ b/pkg/security/risks/builtin/ldap-injection-rule.go @@ -1,15 +1,21 @@ -package ldap_injection +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type LdapInjectionRule struct{} + +func NewLdapInjectionRule() *LdapInjectionRule { + return &LdapInjectionRule{} +} + +func (*LdapInjectionRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "ldap-injection", Title: "LDAP-Injection", Description: "When an LDAP server is accessed LDAP-Injection risks might arise. " + - "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.", + "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed.", Impact: "If this risk remains unmitigated, attackers might be able to modify LDAP queries and access more data from the LDAP server than allowed.", ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements", CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html", @@ -18,10 +24,10 @@ func Category() model.RiskCategory { "the LDAP sever in order to stay safe from LDAP-Injection vulnerabilities. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Tampering, + Function: types.Development, + STRIDE: types.Tampering, DetectionLogic: "In-scope clients accessing LDAP servers via typical LDAP access protocols.", - RiskAssessment: "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed.", FalsePositives: "LDAP server queries by search values not consisting of parts controllable by the caller can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -29,49 +35,49 @@ func Category() model.RiskCategory { } } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] +func (*LdapInjectionRule) SupportedTags() []string { + return []string{} +} + +func (r *LdapInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { + incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] for _, incomingFlow := range incomingFlows { - if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope { + if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope { continue } - if incomingFlow.Protocol == model.LDAP || incomingFlow.Protocol == model.LDAPS { - likelihood := model.Likely - if incomingFlow.Usage == model.DevOps { - likelihood = model.Unlikely + if incomingFlow.Protocol == types.LDAP || incomingFlow.Protocol == types.LDAPS { + likelihood := types.Likely + if incomingFlow.Usage == types.DevOps { + likelihood = types.Unlikely } - risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood)) + risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood)) } } } return risks } -func SupportedTags() []string { - return []string{} -} - -func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk { - caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] +func (r *LdapInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk { + caller := input.TechnicalAssets[incomingFlow.SourceId] title := "LDAP-Injection risk at " + caller.Title + " against LDAP server " + technicalAsset.Title + "" + " via " + incomingFlow.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical { + impact = types.HighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: caller.Id, MostRelevantCommunicationLinkId: incomingFlow.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id return risk } diff --git a/pkg/security/risks/builtin/missing-authentication-rule.go b/pkg/security/risks/builtin/missing-authentication-rule.go new file mode 100644 index 00000000..faf334c5 --- /dev/null +++ b/pkg/security/risks/builtin/missing-authentication-rule.go @@ -0,0 +1,104 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingAuthenticationRule struct{} + +func NewMissingAuthenticationRule() *MissingAuthenticationRule { + return &MissingAuthenticationRule{} +} + +func (*MissingAuthenticationRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-authentication", + Title: "Missing Authentication", + Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes sensitive data. ", + Impact: "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.", + ASVS: "V2 - Authentication Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", + Action: "Authentication of Incoming Requests", + Mitigation: "Apply an authentication method to the technical asset. To protect highly sensitive data consider " + + "the use of two-factor authentication for human users.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope technical assets (except " + types.LoadBalancer.String() + ", " + types.ReverseProxy.String() + ", " + types.ServiceRegistry.String() + ", " + types.WAF.String() + ", " + types.IDS.String() + ", and " + types.IPS.String() + " and in-process calls) should authenticate incoming requests when the asset processes " + + "sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).", + RiskAssessment: "The risk rating (medium or high) " + + "depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.", + FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + + "can be considered as false positives after individual review.", + ModelFailurePossibleReason: false, + CWE: 306, + } +} + +func (*MissingAuthenticationRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingAuthenticationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.OutOfScope || technicalAsset.Technology == types.LoadBalancer || + technicalAsset.Technology == types.ReverseProxy || technicalAsset.Technology == types.ServiceRegistry || technicalAsset.Technology == types.WAF || technicalAsset.Technology == types.IDS || technicalAsset.Technology == types.IPS { + continue + } + if technicalAsset.HighestConfidentiality(input) >= types.Confidential || + technicalAsset.HighestIntegrity(input) >= types.Critical || + technicalAsset.HighestAvailability(input) >= types.Critical || + technicalAsset.MultiTenant { + // check each incoming data flow + commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + for _, commLink := range commLinks { + caller := input.TechnicalAssets[commLink.SourceId] + if caller.Technology.IsUnprotectedCommunicationsTolerated() || caller.Type == types.Datastore { + continue + } + highRisk := commLink.HighestConfidentiality(input) == types.StrictlyConfidential || + commLink.HighestIntegrity(input) == types.MissionCritical + lowRisk := commLink.HighestConfidentiality(input) <= types.Internal && + commLink.HighestIntegrity(input) == types.Operational + impact := types.MediumImpact + if highRisk { + impact = types.HighImpact + } else if lowRisk { + impact = types.LowImpact + } + if commLink.Authentication == types.NoneAuthentication && !commLink.Protocol.IsProcessLocal() { + risks = append(risks, r.createRisk(input, technicalAsset, commLink, commLink, "", impact, types.Likely, false, r.Category())) + } + } + } + } + return risks +} + +func (r *MissingAuthenticationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess, incomingAccessOrigin types.CommunicationLink, hopBetween string, + impact types.RiskExploitationImpact, likelihood types.RiskExploitationLikelihood, twoFactor bool, category types.RiskCategory) types.Risk { + factorString := "" + if twoFactor { + factorString = "Two-Factor " + } + if len(hopBetween) > 0 { + hopBetween = "forwarded via " + hopBetween + " " + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), + ExploitationLikelihood: likelihood, + ExploitationImpact: impact, + Title: "Missing " + factorString + "Authentication covering communication link " + incomingAccess.Title + " " + + "from " + input.TechnicalAssets[incomingAccessOrigin.SourceId].Title + " " + hopBetween + + "to " + technicalAsset.Title + "", + MostRelevantTechnicalAssetId: technicalAsset.Id, + MostRelevantCommunicationLinkId: incomingAccess.Id, + DataBreachProbability: types.Possible, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + incomingAccess.Id + "@" + input.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id + return risk +} diff --git a/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go b/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go new file mode 100644 index 00000000..9c159187 --- /dev/null +++ b/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go @@ -0,0 +1,91 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingAuthenticationSecondFactorRule struct { + missingAuthenticationRule *MissingAuthenticationRule +} + +func NewMissingAuthenticationSecondFactorRule(missingAuthenticationRule *MissingAuthenticationRule) *MissingAuthenticationSecondFactorRule { + return &MissingAuthenticationSecondFactorRule{missingAuthenticationRule: missingAuthenticationRule} +} + +func (*MissingAuthenticationSecondFactorRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-authentication-second-factor", + Title: "Missing Two-Factor Authentication (2FA)", + Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests with " + + "two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.", + Impact: "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.", + ASVS: "V2 - Authentication Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html", + Action: "Authentication with Second Factor (2FA)", + Mitigation: "Apply an authentication method to the technical asset protecting highly sensitive data via " + + "two-factor authentication for human users.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.BusinessSide, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope technical assets (except " + types.LoadBalancer.String() + ", " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", " + types.IDS.String() + ", and " + types.IPS.String() + ") should authenticate incoming requests via two-factor authentication (2FA) " + + "when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.", + RiskAssessment: types.MediumSeverity.String(), + FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + + "can be considered as false positives after individual review.", + ModelFailurePossibleReason: false, + CWE: 308, + } +} + +func (*MissingAuthenticationSecondFactorRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingAuthenticationSecondFactorRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.OutOfScope || + technicalAsset.Technology.IsTrafficForwarding() || + technicalAsset.Technology.IsUnprotectedCommunicationsTolerated() { + continue + } + if technicalAsset.HighestConfidentiality(input) >= types.Confidential || + technicalAsset.HighestIntegrity(input) >= types.Critical || + technicalAsset.HighestAvailability(input) >= types.Critical || + technicalAsset.MultiTenant { + // check each incoming data flow + commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + for _, commLink := range commLinks { + caller := input.TechnicalAssets[commLink.SourceId] + if caller.Technology.IsUnprotectedCommunicationsTolerated() || caller.Type == types.Datastore { + continue + } + if caller.UsedAsClientByHuman { + moreRisky := commLink.HighestConfidentiality(input) >= types.Confidential || + commLink.HighestIntegrity(input) >= types.Critical + if moreRisky && commLink.Authentication != types.TwoFactor { + risks = append(risks, r.missingAuthenticationRule.createRisk(input, technicalAsset, commLink, commLink, "", types.MediumImpact, types.Unlikely, true, r.Category())) + } + } else if caller.Technology.IsTrafficForwarding() { + // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human + callersCommLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[caller.Id] + for _, callersCommLink := range callersCommLinks { + callersCaller := input.TechnicalAssets[callersCommLink.SourceId] + if callersCaller.Technology.IsUnprotectedCommunicationsTolerated() || callersCaller.Type == types.Datastore { + continue + } + if callersCaller.UsedAsClientByHuman { + moreRisky := callersCommLink.HighestConfidentiality(input) >= types.Confidential || + callersCommLink.HighestIntegrity(input) >= types.Critical + if moreRisky && callersCommLink.Authentication != types.TwoFactor { + risks = append(risks, r.missingAuthenticationRule.createRisk(input, technicalAsset, commLink, callersCommLink, caller.Title, types.MediumImpact, types.Unlikely, true, r.Category())) + } + } + } + } + } + } + } + return risks +} diff --git a/risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go b/pkg/security/risks/builtin/missing-build-infrastructure-rule.go similarity index 57% rename from risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go rename to pkg/security/risks/builtin/missing-build-infrastructure-rule.go index 1eb1662a..22dea469 100644 --- a/risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go +++ b/pkg/security/risks/builtin/missing-build-infrastructure-rule.go @@ -1,11 +1,17 @@ -package missing_build_infrastructure +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingBuildInfrastructureRule struct{} + +func NewMissingBuildInfrastructureRule() *MissingBuildInfrastructureRule { + return &MissingBuildInfrastructureRule{} +} + +func (*MissingBuildInfrastructureRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-build-infrastructure", Title: "Missing Build Infrastructure", Description: "The modeled architecture does not contain a build infrastructure (devops-client, sourcecode-repo, build-pipeline, etc.), " + @@ -19,8 +25,8 @@ func Category() model.RiskCategory { Action: "Build Pipeline Hardening", Mitigation: "Include the build infrastructure in the model.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Tampering, + Function: types.Architecture, + STRIDE: types.Tampering, DetectionLogic: "Models with in-scope custom-developed parts missing in-scope development (code creation) and build infrastructure " + "components (devops-client, sourcecode-repo, build-pipeline, etc.).", RiskAssessment: "The risk rating depends on the highest sensitivity of the in-scope assets running custom-developed parts.", @@ -31,66 +37,66 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MissingBuildInfrastructureRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) +func (r *MissingBuildInfrastructureRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) hasCustomDevelopedParts, hasBuildPipeline, hasSourcecodeRepo, hasDevOpsClient := false, false, false, false - impact := model.LowImpact - var mostRelevantAsset model.TechnicalAsset - for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] + impact := types.LowImpact + var mostRelevantAsset types.TechnicalAsset + for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset + technicalAsset := input.TechnicalAssets[id] if technicalAsset.CustomDevelopedParts && !technicalAsset.OutOfScope { hasCustomDevelopedParts = true - if impact == model.LowImpact { + if impact == types.LowImpact { mostRelevantAsset = technicalAsset - if technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical || - technicalAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact + if technicalAsset.HighestConfidentiality(input) >= types.Confidential || + technicalAsset.HighestIntegrity(input) >= types.Critical || + technicalAsset.HighestAvailability(input) >= types.Critical { + impact = types.MediumImpact } } - if technicalAsset.Confidentiality >= model.Confidential || - technicalAsset.Integrity >= model.Critical || - technicalAsset.Availability >= model.Critical { - impact = model.MediumImpact + if technicalAsset.Confidentiality >= types.Confidential || + technicalAsset.Integrity >= types.Critical || + technicalAsset.Availability >= types.Critical { + impact = types.MediumImpact } // just for referencing the most interesting asset if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { mostRelevantAsset = technicalAsset } } - if technicalAsset.Technology == model.BuildPipeline { + if technicalAsset.Technology == types.BuildPipeline { hasBuildPipeline = true } - if technicalAsset.Technology == model.SourcecodeRepository { + if technicalAsset.Technology == types.SourcecodeRepository { hasSourcecodeRepo = true } - if technicalAsset.Technology == model.DevOpsClient { + if technicalAsset.Technology == types.DevOpsClient { hasDevOpsClient = true } } hasBuildInfrastructure := hasBuildPipeline && hasSourcecodeRepo && hasDevOpsClient if hasCustomDevelopedParts && !hasBuildInfrastructure { - risks = append(risks, createRisk(mostRelevantAsset, impact)) + risks = append(risks, r.createRisk(mostRelevantAsset, impact)) } return risks } -func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk { +func (r *MissingBuildInfrastructureRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk { title := "Missing Build Infrastructure in the threat model (referencing asset " + technicalAsset.Title + " as an example)" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go b/pkg/security/risks/builtin/missing-cloud-hardening-rule.go similarity index 57% rename from risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go rename to pkg/security/risks/builtin/missing-cloud-hardening-rule.go index e7dddb3a..b6f6b06a 100644 --- a/risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go +++ b/pkg/security/risks/builtin/missing-cloud-hardening-rule.go @@ -1,12 +1,19 @@ -package missing_cloud_hardening +package builtin import ( - "github.com/threagile/threagile/model" "sort" + + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingCloudHardeningRule struct{} + +func NewMissingCloudHardeningRule() *MissingCloudHardeningRule { + return &MissingCloudHardeningRule{} +} + +func (*MissingCloudHardeningRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-cloud-hardening", Title: "Missing Cloud Hardening", Description: "Cloud components should be hardened according to the cloud vendor best practices. This affects their " + @@ -25,10 +32,10 @@ func Category() model.RiskCategory { "

For Google Cloud Platform: Follow the CIS Benchmark for Google Cloud Computing Platform (see also the automated checks of cloud audit tools like \"CloudSploit\" or \"ScoutSuite\"). " + "

For Oracle Cloud Platform: Follow the hardening best practices (see also the automated checks of cloud audit tools like \"CloudSploit\").", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.Tampering, + Function: types.Operations, + STRIDE: types.Tampering, DetectionLogic: "In-scope cloud components (either residing in cloud trust boundaries or more specifically tagged with cloud provider types).", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Cloud components not running parts of the target architecture can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -36,59 +43,59 @@ func Category() model.RiskCategory { } } -var specificSubtagsAWS = []string{"aws:vpc", "aws:ec2", "aws:s3", "aws:ebs", "aws:apigateway", "aws:lambda", "aws:dynamodb", "aws:rds", "aws:sqs", "aws:iam"} +var specificSubTagsAWS = []string{"aws:vpc", "aws:ec2", "aws:s3", "aws:ebs", "aws:apigateway", "aws:lambda", "aws:dynamodb", "aws:rds", "aws:sqs", "aws:iam"} -func SupportedTags() []string { +func (*MissingCloudHardeningRule) SupportedTags() []string { res := []string{ "aws", // Amazon AWS "azure", // Microsoft Azure "gcp", // Google Cloud Platform "ocp", // Oracle Cloud Platform } - res = append(res, specificSubtagsAWS...) + res = append(res, specificSubTagsAWS...) return res } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) +func (r *MissingCloudHardeningRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) - sharedRuntimesWithUnspecificCloudRisks := make(map[string]bool, 0) - trustBoundariesWithUnspecificCloudRisks := make(map[string]bool, 0) - techAssetsWithUnspecificCloudRisks := make(map[string]bool, 0) + sharedRuntimesWithUnspecificCloudRisks := make(map[string]bool) + trustBoundariesWithUnspecificCloudRisks := make(map[string]bool) + techAssetsWithUnspecificCloudRisks := make(map[string]bool) - sharedRuntimeIDsAWS := make(map[string]bool, 0) - trustBoundaryIDsAWS := make(map[string]bool, 0) - techAssetIDsAWS := make(map[string]bool, 0) + sharedRuntimeIDsAWS := make(map[string]bool) + trustBoundaryIDsAWS := make(map[string]bool) + techAssetIDsAWS := make(map[string]bool) - sharedRuntimeIDsAzure := make(map[string]bool, 0) - trustBoundaryIDsAzure := make(map[string]bool, 0) - techAssetIDsAzure := make(map[string]bool, 0) + sharedRuntimeIDsAzure := make(map[string]bool) + trustBoundaryIDsAzure := make(map[string]bool) + techAssetIDsAzure := make(map[string]bool) - sharedRuntimeIDsGCP := make(map[string]bool, 0) - trustBoundaryIDsGCP := make(map[string]bool, 0) - techAssetIDsGCP := make(map[string]bool, 0) + sharedRuntimeIDsGCP := make(map[string]bool) + trustBoundaryIDsGCP := make(map[string]bool) + techAssetIDsGCP := make(map[string]bool) - sharedRuntimeIDsOCP := make(map[string]bool, 0) - trustBoundaryIDsOCP := make(map[string]bool, 0) - techAssetIDsOCP := make(map[string]bool, 0) + sharedRuntimeIDsOCP := make(map[string]bool) + trustBoundaryIDsOCP := make(map[string]bool) + techAssetIDsOCP := make(map[string]bool) - techAssetIDsWithSubtagSpecificCloudRisks := make(map[string]bool, 0) + techAssetIDsWithSubtagSpecificCloudRisks := make(map[string]bool) - for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { - taggedOuterTB := trustBoundary.IsTaggedWithAny(SupportedTags()...) // false = generic cloud risks only // true = cloud-individual risks + for _, trustBoundary := range input.TrustBoundaries { + taggedOuterTB := trustBoundary.IsTaggedWithAny(r.SupportedTags()...) // false = generic cloud risks only // true = cloud-individual risks if taggedOuterTB || trustBoundary.Type.IsWithinCloud() { - addTrustBoundaryAccordingToBasetag(trustBoundary, trustBoundariesWithUnspecificCloudRisks, + r.addTrustBoundaryAccordingToBaseTag(trustBoundary, trustBoundariesWithUnspecificCloudRisks, trustBoundaryIDsAWS, trustBoundaryIDsAzure, trustBoundaryIDsGCP, trustBoundaryIDsOCP) - for _, techAssetID := range trustBoundary.RecursivelyAllTechnicalAssetIDsInside() { + for _, techAssetID := range trustBoundary.RecursivelyAllTechnicalAssetIDsInside(input) { added := false - tA := model.ParsedModelRoot.TechnicalAssets[techAssetID] - if tA.IsTaggedWithAny(SupportedTags()...) { - addAccordingToBasetag(tA, tA.Tags, + tA := input.TechnicalAssets[techAssetID] + if tA.IsTaggedWithAny(r.SupportedTags()...) { + addAccordingToBaseTag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) added = true } else if taggedOuterTB { - addAccordingToBasetag(tA, trustBoundary.Tags, + addAccordingToBaseTag(tA, trustBoundary.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) added = true @@ -101,31 +108,31 @@ func GenerateRisks() []model.Risk { } // now loop over all technical assets, trust boundaries, and shared runtimes model-wide by tag - for _, tA := range model.TechnicalAssetsTaggedWithAny(SupportedTags()...) { - addAccordingToBasetag(tA, tA.Tags, + for _, tA := range input.TechnicalAssetsTaggedWithAny(r.SupportedTags()...) { + addAccordingToBaseTag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } - for _, tB := range model.TrustBoundariesTaggedWithAny(SupportedTags()...) { - for _, candidateID := range tB.RecursivelyAllTechnicalAssetIDsInside() { - tA := model.ParsedModelRoot.TechnicalAssets[candidateID] - if tA.IsTaggedWithAny(SupportedTags()...) { - addAccordingToBasetag(tA, tA.Tags, + for _, tB := range input.TrustBoundariesTaggedWithAny(r.SupportedTags()...) { + for _, candidateID := range tB.RecursivelyAllTechnicalAssetIDsInside(input) { + tA := input.TechnicalAssets[candidateID] + if tA.IsTaggedWithAny(r.SupportedTags()...) { + addAccordingToBaseTag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } else { - addAccordingToBasetag(tA, tB.Tags, + addAccordingToBaseTag(tA, tB.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } } } - for _, sR := range model.SharedRuntimesTaggedWithAny(SupportedTags()...) { - addSharedRuntimeAccordingToBasetag(sR, sharedRuntimesWithUnspecificCloudRisks, + for _, sR := range input.SharedRuntimesTaggedWithAny(r.SupportedTags()...) { + r.addSharedRuntimeAccordingToBaseTag(sR, sharedRuntimesWithUnspecificCloudRisks, sharedRuntimeIDsAWS, sharedRuntimeIDsAzure, sharedRuntimeIDsGCP, sharedRuntimeIDsOCP) for _, candidateID := range sR.TechnicalAssetsRunning { - tA := model.ParsedModelRoot.TechnicalAssets[candidateID] - addAccordingToBasetag(tA, sR.Tags, + tA := input.TechnicalAssets[candidateID] + addAccordingToBaseTag(tA, sR.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } @@ -181,87 +188,87 @@ func GenerateRisks() []model.Risk { // first try to add shared runtimes... for id := range sharedRuntimeIDsAWS { - risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "AWS", "CIS Benchmark for AWS")) + risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "AWS", "CIS Benchmark for AWS")) addedAWS = true } for id := range sharedRuntimeIDsAzure { - risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "Azure", "CIS Benchmark for Microsoft Azure")) + risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } for id := range sharedRuntimeIDsGCP { - risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) + risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } for id := range sharedRuntimeIDsOCP { - risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) + risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } for id := range sharedRuntimesWithUnspecificCloudRisks { - risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "", "")) + risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "", "")) } // ... followed by trust boundaries for the generic risks for id := range trustBoundaryIDsAWS { - risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "AWS", "CIS Benchmark for AWS")) + risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "AWS", "CIS Benchmark for AWS")) addedAWS = true } for id := range trustBoundaryIDsAzure { - risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "Azure", "CIS Benchmark for Microsoft Azure")) + risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } for id := range trustBoundaryIDsGCP { - risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) + risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } for id := range trustBoundaryIDsOCP { - risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) + risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } for id := range trustBoundariesWithUnspecificCloudRisks { - risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "", "")) + risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "", "")) } // just use the most sensitive asset as an example - to only create one general "AWS cloud hardening" risk, not many if !addedAWS { - mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAWS) + mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsAWS) if !mostRelevantAsset.IsZero() { - risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "AWS", "CIS Benchmark for AWS")) + risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "AWS", "CIS Benchmark for AWS")) addedAWS = true } } // just use the most sensitive asset as an example - to only create one general "Azure cloud hardening" risk, not many if !addedAzure { - mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAzure) + mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsAzure) if !mostRelevantAsset.IsZero() { - risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "Azure", "CIS Benchmark for Microsoft Azure")) + risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } } // just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many if !addedGCP { - mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsGCP) + mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsGCP) if !mostRelevantAsset.IsZero() { - risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "GCP", "CIS Benchmark for Google Cloud Computing Platform")) + risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } } // just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many if !addedOCP { - mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsOCP) + mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsOCP) if !mostRelevantAsset.IsZero() { - risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "OCP", "Vendor Best Practices for Oracle Cloud Platform")) + risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } } // now also add all tech asset specific tag-specific risks, as they are specific to the asset anyway (therefore don't set added to true here) for id := range techAssetIDsWithSubtagSpecificCloudRisks { - tA := model.ParsedModelRoot.TechnicalAssets[id] - if tA.IsTaggedWithAnyTraversingUp("aws:ec2") { - risks = append(risks, createRiskForTechnicalAsset(tA, "EC2", "CIS Benchmark for Amazon Linux")) + tA := input.TechnicalAssets[id] + if tA.IsTaggedWithAnyTraversingUp(input, "aws:ec2") { + risks = append(risks, r.createRiskForTechnicalAsset(input, tA, "EC2", "CIS Benchmark for Amazon Linux")) } - if tA.IsTaggedWithAnyTraversingUp("aws:s3") { - risks = append(risks, createRiskForTechnicalAsset(tA, "S3", "Security Best Practices for AWS S3")) + if tA.IsTaggedWithAnyTraversingUp(input, "aws:s3") { + risks = append(risks, r.createRiskForTechnicalAsset(input, tA, "S3", "Security Best Practices for AWS S3")) } // TODO add more tag-specific risks like also for aws:lambda etc. here } @@ -269,13 +276,13 @@ func GenerateRisks() []model.Risk { return risks } -func addTrustBoundaryAccordingToBasetag(trustBoundary model.TrustBoundary, +func (r *MissingCloudHardeningRule) addTrustBoundaryAccordingToBaseTag(trustBoundary types.TrustBoundary, trustBoundariesWithUnspecificCloudRisks map[string]bool, trustBoundaryIDsAWS map[string]bool, trustBoundaryIDsAzure map[string]bool, trustBoundaryIDsGCP map[string]bool, trustBoundaryIDsOCP map[string]bool) { - if trustBoundary.IsTaggedWithAny(SupportedTags()...) { + if trustBoundary.IsTaggedWithAny(r.SupportedTags()...) { if trustBoundary.IsTaggedWithBaseTag("aws") { trustBoundaryIDsAWS[trustBoundary.Id] = true } @@ -293,13 +300,13 @@ func addTrustBoundaryAccordingToBasetag(trustBoundary model.TrustBoundary, } } -func addSharedRuntimeAccordingToBasetag(sharedRuntime model.SharedRuntime, +func (r *MissingCloudHardeningRule) addSharedRuntimeAccordingToBaseTag(sharedRuntime types.SharedRuntime, sharedRuntimesWithUnspecificCloudRisks map[string]bool, sharedRuntimeIDsAWS map[string]bool, sharedRuntimeIDsAzure map[string]bool, sharedRuntimeIDsGCP map[string]bool, sharedRuntimeIDsOCP map[string]bool) { - if sharedRuntime.IsTaggedWithAny(SupportedTags()...) { + if sharedRuntime.IsTaggedWithAny(r.SupportedTags()...) { if sharedRuntime.IsTaggedWithBaseTag("aws") { sharedRuntimeIDsAWS[sharedRuntime.Id] = true } @@ -317,38 +324,38 @@ func addSharedRuntimeAccordingToBasetag(sharedRuntime model.SharedRuntime, } } -func addAccordingToBasetag(techAsset model.TechnicalAsset, tags []string, +func addAccordingToBaseTag(techAsset types.TechnicalAsset, tags []string, techAssetIDsWithTagSpecificCloudRisks map[string]bool, techAssetIDsAWS map[string]bool, techAssetIDsAzure map[string]bool, techAssetIDsGCP map[string]bool, techAssetIDsOCP map[string]bool) { - if techAsset.IsTaggedWithAny(specificSubtagsAWS...) { + if techAsset.IsTaggedWithAny(specificSubTagsAWS...) { techAssetIDsWithTagSpecificCloudRisks[techAsset.Id] = true } - if model.IsTaggedWithBaseTag(tags, "aws") { + if types.IsTaggedWithBaseTag(tags, "aws") { techAssetIDsAWS[techAsset.Id] = true } - if model.IsTaggedWithBaseTag(tags, "azure") { + if types.IsTaggedWithBaseTag(tags, "azure") { techAssetIDsAzure[techAsset.Id] = true } - if model.IsTaggedWithBaseTag(tags, "gcp") { + if types.IsTaggedWithBaseTag(tags, "gcp") { techAssetIDsGCP[techAsset.Id] = true } - if model.IsTaggedWithBaseTag(tags, "ocp") { + if types.IsTaggedWithBaseTag(tags, "ocp") { techAssetIDsOCP[techAsset.Id] = true } } -func findMostSensitiveTechnicalAsset(techAssets map[string]bool) model.TechnicalAsset { - var mostRelevantAsset model.TechnicalAsset +func findMostSensitiveTechnicalAsset(input *types.ParsedModel, techAssets map[string]bool) types.TechnicalAsset { + var mostRelevantAsset types.TechnicalAsset keys := make([]string, 0, len(techAssets)) for k := range techAssets { keys = append(keys, k) } sort.Strings(keys) for _, id := range keys { - tA := model.ParsedModelRoot.TechnicalAssets[id] + tA := input.TechnicalAssets[id] if mostRelevantAsset.IsZero() || tA.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { mostRelevantAsset = tA } @@ -356,7 +363,7 @@ func findMostSensitiveTechnicalAsset(techAssets map[string]bool) model.Technical return mostRelevantAsset } -func createRiskForSharedRuntime(sharedRuntime model.SharedRuntime, prefix, details string) model.Risk { +func (r *MissingCloudHardeningRule) createRiskForSharedRuntime(input *types.ParsedModel, sharedRuntime types.SharedRuntime, prefix, details string) types.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } @@ -364,33 +371,33 @@ func createRiskForSharedRuntime(sharedRuntime model.SharedRuntime, prefix, detai if len(details) > 0 { title += ": " + details + "" } - impact := model.MediumImpact - if sharedRuntime.HighestConfidentiality() >= model.Confidential || - sharedRuntime.HighestIntegrity() >= model.Critical || - sharedRuntime.HighestAvailability() >= model.Critical { - impact = model.HighImpact + impact := types.MediumImpact + if sharedRuntime.HighestConfidentiality(input) >= types.Confidential || + sharedRuntime.HighestIntegrity(input) >= types.Critical || + sharedRuntime.HighestAvailability(input) >= types.Critical { + impact = types.HighImpact } - if sharedRuntime.HighestConfidentiality() == model.StrictlyConfidential || - sharedRuntime.HighestIntegrity() == model.MissionCritical || - sharedRuntime.HighestAvailability() == model.MissionCritical { - impact = model.VeryHighImpact + if sharedRuntime.HighestConfidentiality(input) == types.StrictlyConfidential || + sharedRuntime.HighestIntegrity(input) == types.MissionCritical || + sharedRuntime.HighestAvailability(input) == types.MissionCritical { + impact = types.VeryHighImpact } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantSharedRuntimeId: sharedRuntime.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: sharedRuntime.TechnicalAssetsRunning, } - risk.SyntheticId = risk.Category.Id + "@" + sharedRuntime.Id + risk.SyntheticId = risk.CategoryId + "@" + sharedRuntime.Id return risk } -func createRiskForTrustBoundary(trustBoundary model.TrustBoundary, prefix, details string) model.Risk { +func (r *MissingCloudHardeningRule) createRiskForTrustBoundary(parsedModel *types.ParsedModel, trustBoundary types.TrustBoundary, prefix, details string) types.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } @@ -398,33 +405,33 @@ func createRiskForTrustBoundary(trustBoundary model.TrustBoundary, prefix, detai if len(details) > 0 { title += ": " + details + "" } - impact := model.MediumImpact - if trustBoundary.HighestConfidentiality() >= model.Confidential || - trustBoundary.HighestIntegrity() >= model.Critical || - trustBoundary.HighestAvailability() >= model.Critical { - impact = model.HighImpact + impact := types.MediumImpact + if trustBoundary.HighestConfidentiality(parsedModel) >= types.Confidential || + trustBoundary.HighestIntegrity(parsedModel) >= types.Critical || + trustBoundary.HighestAvailability(parsedModel) >= types.Critical { + impact = types.HighImpact } - if trustBoundary.HighestConfidentiality() == model.StrictlyConfidential || - trustBoundary.HighestIntegrity() == model.MissionCritical || - trustBoundary.HighestAvailability() == model.MissionCritical { - impact = model.VeryHighImpact + if trustBoundary.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + trustBoundary.HighestIntegrity(parsedModel) == types.MissionCritical || + trustBoundary.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.VeryHighImpact } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTrustBoundaryId: trustBoundary.Id, - DataBreachProbability: model.Probable, - DataBreachTechnicalAssetIDs: trustBoundary.RecursivelyAllTechnicalAssetIDsInside(), + DataBreachProbability: types.Probable, + DataBreachTechnicalAssetIDs: trustBoundary.RecursivelyAllTechnicalAssetIDsInside(parsedModel), } - risk.SyntheticId = risk.Category.Id + "@" + trustBoundary.Id + risk.SyntheticId = risk.CategoryId + "@" + trustBoundary.Id return risk } -func createRiskForTechnicalAsset(technicalAsset model.TechnicalAsset, prefix, details string) model.Risk { +func (r *MissingCloudHardeningRule) createRiskForTechnicalAsset(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, prefix, details string) types.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } @@ -432,28 +439,28 @@ func createRiskForTechnicalAsset(technicalAsset model.TechnicalAsset, prefix, de if len(details) > 0 { title += ": " + details + "" } - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical || - technicalAsset.HighestAvailability() >= model.Critical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(parsedModel) >= types.Confidential || + technicalAsset.HighestIntegrity(parsedModel) >= types.Critical || + technicalAsset.HighestAvailability(parsedModel) >= types.Critical { + impact = types.HighImpact } - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.VeryHighImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.VeryHighImpact } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/missing-file-validation/missing-file-validation-rule.go b/pkg/security/risks/builtin/missing-file-validation-rule.go similarity index 55% rename from risks/built-in/missing-file-validation/missing-file-validation-rule.go rename to pkg/security/risks/builtin/missing-file-validation-rule.go index c8633038..2a7c388d 100644 --- a/risks/built-in/missing-file-validation/missing-file-validation-rule.go +++ b/pkg/security/risks/builtin/missing-file-validation-rule.go @@ -1,11 +1,17 @@ -package missing_file_validation +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingFileValidationRule struct{} + +func NewMissingFileValidationRule() *MissingFileValidationRule { + return &MissingFileValidationRule{} +} + +func (*MissingFileValidationRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-file-validation", Title: "Missing File Validation", Description: "When a technical asset accepts files, these input files should be strictly validated about filename and type.", @@ -15,14 +21,14 @@ func Category() model.RiskCategory { Action: "File Validation", Mitigation: "Filter by file extension and discard (if feasible) the name provided. Whitelist the accepted file types " + "and determine the mime-type on the server-side (for example via \"Apache Tika\" or similar checks). If the file is retrievable by " + - "endusers and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they " + + "end users and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they " + "were uploaded, also apply a fresh malware scan during retrieval to scan with newer signatures of popular malware). Also enforce " + "limits on maximum file size to avoid denial-of-service like scenarios.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Spoofing, + Function: types.Development, + STRIDE: types.Spoofing, DetectionLogic: "In-scope technical assets with custom-developed code accepting file data formats.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) files can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -30,44 +36,44 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MissingFileValidationRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *MissingFileValidationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if technicalAsset.OutOfScope || !technicalAsset.CustomDevelopedParts { continue } for _, format := range technicalAsset.DataFormatsAccepted { - if format == model.File { - risks = append(risks, createRisk(technicalAsset)) + if format == types.File { + risks = append(risks, r.createRisk(input, technicalAsset)) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *MissingFileValidationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Missing File Validation risk at " + technicalAsset.Title + "" - impact := model.LowImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.MediumImpact + impact := types.LowImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(input) == types.MissionCritical || + technicalAsset.HighestAvailability(input) == types.MissionCritical { + impact = types.MediumImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.VeryLikely, impact), - ExploitationLikelihood: model.VeryLikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.VeryLikely, impact), + ExploitationLikelihood: types.VeryLikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/pkg/security/risks/builtin/missing-hardening-rule.go b/pkg/security/risks/builtin/missing-hardening-rule.go new file mode 100644 index 00000000..60d2f6e5 --- /dev/null +++ b/pkg/security/risks/builtin/missing-hardening-rule.go @@ -0,0 +1,78 @@ +package builtin + +import ( + "strconv" + + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingHardeningRule struct { + raaLimit int + raaLimitReduced int +} + +func NewMissingHardeningRule() *MissingHardeningRule { + return &MissingHardeningRule{raaLimit: 55, raaLimitReduced: 40} +} + +func (r *MissingHardeningRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-hardening", + Title: "Missing Hardening", + Description: "Technical assets with a Relative Attacker Attractiveness (RAA) value of " + strconv.Itoa(r.raaLimit) + " % or higher should be " + + "explicitly hardened taking best practices and vendor hardening guides into account.", + Impact: "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.", + ASVS: "V14 - Configuration Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + Action: "System Hardening", + Mitigation: "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor " + + "recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Operations, + STRIDE: types.Tampering, + DetectionLogic: "In-scope technical assets with RAA values of " + strconv.Itoa(r.raaLimit) + " % or higher. " + + "Generally for high-value targets like data stores, application servers, identity providers and ERP systems this limit is reduced to " + strconv.Itoa(r.raaLimitReduced) + " %", + RiskAssessment: "The risk rating depends on the sensitivity of the data processed in the technical asset.", + FalsePositives: "Usually no false positives.", + ModelFailurePossibleReason: false, + CWE: 16, + } +} + +func (*MissingHardeningRule) SupportedTags() []string { + return []string{"tomcat"} +} + +func (r *MissingHardeningRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope { + if technicalAsset.RAA >= float64(r.raaLimit) || (technicalAsset.RAA >= float64(r.raaLimitReduced) && + (technicalAsset.Type == types.Datastore || technicalAsset.Technology == types.ApplicationServer || technicalAsset.Technology == types.IdentityProvider || technicalAsset.Technology == types.ERP)) { + risks = append(risks, r.createRisk(input, technicalAsset)) + } + } + } + return risks +} + +func (r *MissingHardeningRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { + title := "Missing Hardening risk at " + technicalAsset.Title + "" + impact := types.LowImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical { + impact = types.MediumImpact + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Likely, impact), + ExploitationLikelihood: types.Likely, + ExploitationImpact: impact, + Title: title, + MostRelevantTechnicalAssetId: technicalAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + return risk +} diff --git a/pkg/security/risks/builtin/missing-identity-propagation-rule.go b/pkg/security/risks/builtin/missing-identity-propagation-rule.go new file mode 100644 index 00000000..03be30d1 --- /dev/null +++ b/pkg/security/risks/builtin/missing-identity-propagation-rule.go @@ -0,0 +1,106 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingIdentityPropagationRule struct{} + +func NewMissingIdentityPropagationRule() *MissingIdentityPropagationRule { + return &MissingIdentityPropagationRule{} +} + +func (*MissingIdentityPropagationRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-identity-propagation", + Title: "Missing Identity Propagation", + Description: "Technical assets (especially multi-tenant systems), which usually process data for end users should " + + "authorize every request based on the identity of the end user when the data flow is authenticated (i.e. non-public). " + + "For DevOps usages at least a technical-user authorization is required.", + Impact: "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within " + + "the system due to missing resource-based authorization checks.", + ASVS: "V4 - Access Control Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", + Action: "Identity Propagation and Resource-based Authorization", + Mitigation: "When processing requests for end users if possible authorize in the backend against the propagated " + + "identity of the end user. This can be achieved in passing JWTs or similar tokens and checking them in the backend " + + "services. For DevOps usages apply at least a technical-user authorization.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope service-like technical assets which usually process data based on end user requests, if authenticated " + + "(i.e. non-public), should authorize incoming requests based on the propagated end user identity when their rating is sensitive. " + + "This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). " + + "DevOps usages are exempted from this risk.", + RiskAssessment: "The risk rating (medium or high) " + + "depends on the confidentiality, integrity, and availability rating of the technical asset.", + FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + + "can be considered as false positives after individual review.", + ModelFailurePossibleReason: false, + CWE: 284, + } +} + +func (*MissingIdentityPropagationRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingIdentityPropagationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.OutOfScope { + continue + } + if technicalAsset.Technology.IsUsuallyProcessingEndUserRequests() && + (technicalAsset.Confidentiality >= types.Confidential || + technicalAsset.Integrity >= types.Critical || + technicalAsset.Availability >= types.Critical || + (technicalAsset.MultiTenant && + (technicalAsset.Confidentiality >= types.Restricted || + technicalAsset.Integrity >= types.Important || + technicalAsset.Availability >= types.Important))) { + // check each incoming authenticated data flow + commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + for _, commLink := range commLinks { + caller := input.TechnicalAssets[commLink.SourceId] + if !caller.Technology.IsUsuallyAbleToPropagateIdentityToOutgoingTargets() || caller.Type == types.Datastore { + continue + } + if commLink.Authentication != types.NoneAuthentication && + commLink.Authorization != types.EndUserIdentityPropagation { + if commLink.Usage == types.DevOps && commLink.Authorization != types.NoneAuthorization { + continue + } + highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical || + technicalAsset.Availability == types.MissionCritical + risks = append(risks, r.createRisk(input, technicalAsset, commLink, highRisk)) + } + } + } + } + return risks +} + +func (r *MissingIdentityPropagationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink, moreRisky bool) types.Risk { + impact := types.LowImpact + if moreRisky { + impact = types.MediumImpact + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: "Missing End User Identity Propagation over communication link " + incomingAccess.Title + " " + + "from " + input.TechnicalAssets[incomingAccess.SourceId].Title + " " + + "to " + technicalAsset.Title + "", + MostRelevantTechnicalAssetId: technicalAsset.Id, + MostRelevantCommunicationLinkId: incomingAccess.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + incomingAccess.Id + "@" + input.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id + return risk +} diff --git a/risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go b/pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go similarity index 54% rename from risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go rename to pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go index acd44afd..1d47be95 100644 --- a/risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go +++ b/pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go @@ -1,29 +1,35 @@ -package missing_identity_provider_isolation +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingIdentityProviderIsolationRule struct{} + +func NewMissingIdentityProviderIsolationRule() *MissingIdentityProviderIsolationRule { + return &MissingIdentityProviderIsolationRule{} +} + +func (*MissingIdentityProviderIsolationRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-identity-provider-isolation", Title: "Missing Identity Provider Isolation", - Description: "Highly sensitive identity provider assets and their identity datastores should be isolated from other assets " + - "by their own network segmentation trust-boundary (" + model.ExecutionEnvironment.String() + " boundaries do not count as network isolation).", + Description: "Highly sensitive identity provider assets and their identity data stores should be isolated from other assets " + + "by their own network segmentation trust-boundary (" + types.ExecutionEnvironment.String() + " boundaries do not count as network isolation).", Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " + - "highly sensitive identity provider assets and their identity datastores, as they are not separated by network segmentation.", + "highly sensitive identity provider assets and their identity data stores, as they are not separated by network segmentation.", ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", Action: "Network Segmentation", - Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity datastores.", + Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity data stores.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope identity provider assets and their identity datastores " + + Function: types.Operations, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope identity provider assets and their identity data stores " + "when surrounded by other (not identity-related) assets (without a network trust-boundary in-between). " + "This risk is especially prevalent when other non-identity related assets are within the same execution environment (i.e. same database or same application server).", - RiskAssessment: "Default is " + model.HighImpact.String() + " impact. The impact is increased to " + model.VeryHighImpact.String() + " when the asset missing the " + - "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".", + RiskAssessment: "Default is " + types.HighImpact.String() + " impact. The impact is increased to " + types.VeryHighImpact.String() + " when the asset missing the " + + "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".", FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " + "identity providers with data of highest sensitivity.", ModelFailurePossibleReason: false, @@ -31,63 +37,63 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MissingIdentityProviderIsolationRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { +func (r *MissingIdentityProviderIsolationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { if !technicalAsset.OutOfScope && technicalAsset.Technology.IsIdentityRelated() { - moreImpact := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical || - technicalAsset.Availability == model.MissionCritical + moreImpact := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical || + technicalAsset.Availability == types.MissionCritical sameExecutionEnv := false createRiskEntry := false // now check for any other same-network assets of non-identity-related types - for sparringAssetCandidateId, _ := range model.ParsedModelRoot.TechnicalAssets { // so inner loop again over all assets + for sparringAssetCandidateId := range input.TechnicalAssets { // so inner loop again over all assets if technicalAsset.Id != sparringAssetCandidateId { - sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId] + sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId] if !sparringAssetCandidate.Technology.IsIdentityRelated() && !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() { - if technicalAsset.IsSameExecutionEnvironment(sparringAssetCandidateId) { + if technicalAsset.IsSameExecutionEnvironment(input, sparringAssetCandidateId) { createRiskEntry = true sameExecutionEnv = true - } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) { + } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) { createRiskEntry = true } } } } if createRiskEntry { - risks = append(risks, createRisk(technicalAsset, moreImpact, sameExecutionEnv)) + risks = append(risks, r.createRisk(technicalAsset, moreImpact, sameExecutionEnv)) } } } return risks } -func createRisk(techAsset model.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) model.Risk { - impact := model.HighImpact - likelihood := model.Unlikely +func (r *MissingIdentityProviderIsolationRule) createRisk(techAsset types.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) types.Risk { + impact := types.HighImpact + likelihood := types.Unlikely others := "in the same network segment" if moreImpact { - impact = model.VeryHighImpact + impact = types.VeryHighImpact } if sameExecutionEnv { - likelihood = model.Likely + likelihood = types.Likely others = "in the same execution environment" } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: "Missing Identity Provider Isolation to further encapsulate and protect identity-related asset " + techAsset.Title + " against unrelated " + "lower protected assets " + others + ", which might be easier to compromise by attackers", MostRelevantTechnicalAssetId: techAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{techAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id return risk } diff --git a/pkg/security/risks/builtin/missing-identity-store-rule.go b/pkg/security/risks/builtin/missing-identity-store-rule.go new file mode 100644 index 00000000..9bdb1d2d --- /dev/null +++ b/pkg/security/risks/builtin/missing-identity-store-rule.go @@ -0,0 +1,101 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingIdentityStoreRule struct{} + +func NewMissingIdentityStoreRule() *MissingIdentityStoreRule { + return &MissingIdentityStoreRule{} +} + +func (*MissingIdentityStoreRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-identity-store", + Title: "Missing Identity Store", + Description: "The modeled architecture does not contain an identity store, which might be the risk of a model missing " + + "critical assets (and thus not seeing their risks).", + Impact: "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store " + + "that is currently missing in the model.", + ASVS: "V2 - Authentication Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", + Action: "Identity Store", + Mitigation: "Include an identity store in the model if the application has a login.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.Spoofing, + DetectionLogic: "Models with authenticated data-flows authorized via end user identity missing an in-scope identity store.", + RiskAssessment: "The risk rating depends on the sensitivity of the end user-identity authorized technical assets and " + + "their data assets processed.", + FalsePositives: "Models only offering data/services without any real authentication need " + + "can be considered as false positives after individual review.", + ModelFailurePossibleReason: true, + CWE: 287, + } +} + +func (*MissingIdentityStoreRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingIdentityStoreRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { + if !technicalAsset.OutOfScope && + (technicalAsset.Technology == types.IdentityStoreLDAP || technicalAsset.Technology == types.IdentityStoreDatabase) { + // everything fine, no risk, as we have an in-scope identity store in the model + return risks + } + } + // now check if we have end user identity authorized communication links, then it's a risk + riskIdentified := false + var mostRelevantAsset types.TechnicalAsset + impact := types.LowImpact + for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset + technicalAsset := input.TechnicalAssets[id] + for _, commLink := range technicalAsset.CommunicationLinksSorted() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset + if commLink.Authorization == types.EndUserIdentityPropagation { + riskIdentified = true + targetAsset := input.TechnicalAssets[commLink.TargetId] + if impact == types.LowImpact { + mostRelevantAsset = targetAsset + if targetAsset.HighestConfidentiality(input) >= types.Confidential || + targetAsset.HighestIntegrity(input) >= types.Critical || + targetAsset.HighestAvailability(input) >= types.Critical { + impact = types.MediumImpact + } + } + if targetAsset.Confidentiality >= types.Confidential || + targetAsset.Integrity >= types.Critical || + targetAsset.Availability >= types.Critical { + impact = types.MediumImpact + } + // just for referencing the most interesting asset + if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { + mostRelevantAsset = technicalAsset + } + } + } + } + if riskIdentified { + risks = append(risks, r.createRisk(mostRelevantAsset, impact)) + } + return risks +} + +func (r *MissingIdentityStoreRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk { + title := "Missing Identity Store in the threat model (referencing asset " + technicalAsset.Title + " as an example)" + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: title, + MostRelevantTechnicalAssetId: technicalAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{}, + } + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + return risk +} diff --git a/pkg/security/risks/builtin/missing-network-segmentation-rule.go b/pkg/security/risks/builtin/missing-network-segmentation-rule.go new file mode 100644 index 00000000..659d01bf --- /dev/null +++ b/pkg/security/risks/builtin/missing-network-segmentation-rule.go @@ -0,0 +1,104 @@ +package builtin + +import ( + "sort" + + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingNetworkSegmentationRule struct { + raaLimit int +} + +func NewMissingNetworkSegmentationRule() *MissingNetworkSegmentationRule { + return &MissingNetworkSegmentationRule{raaLimit: 50} +} + +func (*MissingNetworkSegmentationRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-network-segmentation", + Title: "Missing Network Segmentation", + Description: "Highly sensitive assets and/or data stores residing in the same network segment than other " + + "lower sensitive assets (like webservers or content management systems etc.) should be better protected " + + "by a network segmentation trust-boundary.", + Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " + + "more valuable targets, as they are not separated by network segmentation.", + ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + Action: "Network Segmentation", + Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive assets and/or data stores.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Operations, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope technical assets with high sensitivity and RAA values as well as data stores " + + "when surrounded by assets (without a network trust-boundary in-between) which are of type " + types.ClientSystem.String() + ", " + + types.WebServer.String() + ", " + types.WebApplication.String() + ", " + types.CMS.String() + ", " + types.WebServiceREST.String() + ", " + types.WebServiceSOAP.String() + ", " + + types.BuildPipeline.String() + ", " + types.SourcecodeRepository.String() + ", " + types.Monitoring.String() + ", or similar and there is no direct connection between these " + + "(hence no requirement to be so close to each other).", + RiskAssessment: "Default is " + types.LowSeverity.String() + " risk. The risk is increased to " + types.MediumSeverity.String() + " when the asset missing the " + + "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".", + FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " + + "containing/processing highly sensitive data.", + ModelFailurePossibleReason: false, + CWE: 1008, + } +} + +func (*MissingNetworkSegmentationRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingNetworkSegmentationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order + // range over them in sorted (hence re-producible) way: + keys := make([]string, 0) + for k := range input.TechnicalAssets { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + technicalAsset := input.TechnicalAssets[key] + if !technicalAsset.OutOfScope && technicalAsset.Technology != types.ReverseProxy && technicalAsset.Technology != types.WAF && technicalAsset.Technology != types.IDS && technicalAsset.Technology != types.IPS && technicalAsset.Technology != types.ServiceRegistry { + if technicalAsset.RAA >= float64(r.raaLimit) && (technicalAsset.Type == types.Datastore || technicalAsset.Confidentiality >= types.Confidential || + technicalAsset.Integrity >= types.Critical || technicalAsset.Availability >= types.Critical) { + // now check for any other same-network assets of certain types which have no direct connection + for _, sparringAssetCandidateId := range keys { // so inner loop again over all assets + if technicalAsset.Id != sparringAssetCandidateId { + sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId] + if sparringAssetCandidate.Technology.IsLessProtectedType() && + technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) && + !technicalAsset.HasDirectConnection(input, sparringAssetCandidateId) && + !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() { + highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical || technicalAsset.Availability == types.MissionCritical + risks = append(risks, r.createRisk(technicalAsset, highRisk)) + break + } + } + } + } + } + } + return risks +} + +func (r *MissingNetworkSegmentationRule) createRisk(techAsset types.TechnicalAsset, moreRisky bool) types.Risk { + impact := types.LowImpact + if moreRisky { + impact = types.MediumImpact + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: "Missing Network Segmentation to further encapsulate and protect " + techAsset.Title + " against unrelated " + + "lower protected assets in the same network segment, which might be easier to compromise by attackers", + MostRelevantTechnicalAssetId: techAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{techAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id + return risk +} diff --git a/pkg/security/risks/builtin/missing-vault-isolation-rule.go b/pkg/security/risks/builtin/missing-vault-isolation-rule.go new file mode 100644 index 00000000..7a9d6497 --- /dev/null +++ b/pkg/security/risks/builtin/missing-vault-isolation-rule.go @@ -0,0 +1,103 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type MissingVaultIsolationRule struct{} + +func NewMissingVaultIsolationRule() *MissingVaultIsolationRule { + return &MissingVaultIsolationRule{} +} + +func (*MissingVaultIsolationRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "missing-vault-isolation", + Title: "Missing Vault Isolation", + Description: "Highly sensitive vault assets and their data stores should be isolated from other assets " + + "by their own network segmentation trust-boundary (" + types.ExecutionEnvironment.String() + " boundaries do not count as network isolation).", + Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " + + "highly sensitive vault assets and their data stores, as they are not separated by network segmentation.", + ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + Action: "Network Segmentation", + Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their data stores.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Operations, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope vault assets " + + "when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). " + + "This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).", + RiskAssessment: "Default is " + types.MediumImpact.String() + " impact. The impact is increased to " + types.HighImpact.String() + " when the asset missing the " + + "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".", + FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " + + "vaults with data of highest sensitivity.", + ModelFailurePossibleReason: false, + CWE: 1008, + } +} + +func (*MissingVaultIsolationRule) SupportedTags() []string { + return []string{} +} + +func (r *MissingVaultIsolationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { + if !technicalAsset.OutOfScope && technicalAsset.Technology == types.Vault { + moreImpact := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical || + technicalAsset.Availability == types.MissionCritical + sameExecutionEnv := false + createRiskEntry := false + // now check for any other same-network assets of non-vault-related types + for sparringAssetCandidateId := range input.TechnicalAssets { // so inner loop again over all assets + if technicalAsset.Id != sparringAssetCandidateId { + sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId] + if sparringAssetCandidate.Technology != types.Vault && !isVaultStorage(input, technicalAsset, sparringAssetCandidate) { + if technicalAsset.IsSameExecutionEnvironment(input, sparringAssetCandidateId) { + createRiskEntry = true + sameExecutionEnv = true + } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) { + createRiskEntry = true + } + } + } + } + if createRiskEntry { + risks = append(risks, r.createRisk(technicalAsset, moreImpact, sameExecutionEnv)) + } + } + } + return risks +} + +func isVaultStorage(parsedModel *types.ParsedModel, vault types.TechnicalAsset, storage types.TechnicalAsset) bool { + return storage.Type == types.Datastore && vault.HasDirectConnection(parsedModel, storage.Id) +} + +func (r *MissingVaultIsolationRule) createRisk(techAsset types.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) types.Risk { + impact := types.MediumImpact + likelihood := types.Unlikely + others := "in the same network segment" + if moreImpact { + impact = types.HighImpact + } + if sameExecutionEnv { + likelihood = types.Likely + others = "in the same execution environment" + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), + ExploitationLikelihood: likelihood, + ExploitationImpact: impact, + Title: "Missing Vault Isolation to further encapsulate and protect vault-related asset " + techAsset.Title + " against unrelated " + + "lower protected assets " + others + ", which might be easier to compromise by attackers", + MostRelevantTechnicalAssetId: techAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{techAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id + return risk +} diff --git a/risks/built-in/missing-vault/missing-vault-rule.go b/pkg/security/risks/builtin/missing-vault-rule.go similarity index 55% rename from risks/built-in/missing-vault/missing-vault-rule.go rename to pkg/security/risks/builtin/missing-vault-rule.go index a046131b..61e8e1c1 100644 --- a/risks/built-in/missing-vault/missing-vault-rule.go +++ b/pkg/security/risks/builtin/missing-vault-rule.go @@ -1,11 +1,17 @@ -package missing_vault +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingVaultRule struct{} + +func NewMissingVaultRule() *MissingVaultRule { + return &MissingVaultRule{} +} + +func (*MissingVaultRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-vault", Title: "Missing Vault (Secret Storage)", Description: "In order to avoid the risk of secret leakage via config files (when attacked through vulnerabilities being able to " + @@ -19,10 +25,10 @@ func Category() model.RiskCategory { Action: "Vault (Secret Storage)", Mitigation: "Consider using a Vault (Secret Storage) to securely store and access config secrets (like credentials, private keys, client certificates, etc.).", Check: "Is a Vault (Secret Storage) in place?", - Function: model.Architecture, - STRIDE: model.InformationDisclosure, + Function: types.Architecture, + STRIDE: types.InformationDisclosure, DetectionLogic: "Models without a Vault (Secret Storage).", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Models where no technical assets have any kind of sensitive config data to protect " + "can be considered as false positives after individual review.", ModelFailurePossibleReason: true, @@ -30,29 +36,29 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MissingVaultRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) +func (r *MissingVaultRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) hasVault := false - var mostRelevantAsset model.TechnicalAsset - impact := model.LowImpact - for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset - techAsset := model.ParsedModelRoot.TechnicalAssets[id] - if techAsset.Technology == model.Vault { + var mostRelevantAsset types.TechnicalAsset + impact := types.LowImpact + for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset + techAsset := input.TechnicalAssets[id] + if techAsset.Technology == types.Vault { hasVault = true } - if techAsset.HighestConfidentiality() >= model.Confidential || - techAsset.HighestIntegrity() >= model.Critical || - techAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact + if techAsset.HighestConfidentiality(input) >= types.Confidential || + techAsset.HighestIntegrity(input) >= types.Critical || + techAsset.HighestAvailability(input) >= types.Critical { + impact = types.MediumImpact } - if techAsset.Confidentiality >= model.Confidential || - techAsset.Integrity >= model.Critical || - techAsset.Availability >= model.Critical { - impact = model.MediumImpact + if techAsset.Confidentiality >= types.Confidential || + techAsset.Integrity >= types.Critical || + techAsset.Availability >= types.Critical { + impact = types.MediumImpact } // just for referencing the most interesting asset if techAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { @@ -60,23 +66,23 @@ func GenerateRisks() []model.Risk { } } if !hasVault { - risks = append(risks, createRisk(mostRelevantAsset, impact)) + risks = append(risks, r.createRisk(mostRelevantAsset, impact)) } return risks } -func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk { +func (r *MissingVaultRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk { title := "Missing Vault (Secret Storage) in the threat model (referencing asset " + technicalAsset.Title + " as an example)" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/missing-waf/missing-waf-rule.go b/pkg/security/risks/builtin/missing-waf-rule.go similarity index 60% rename from risks/built-in/missing-waf/missing-waf-rule.go rename to pkg/security/risks/builtin/missing-waf-rule.go index 684cc9af..f2c1132b 100644 --- a/risks/built-in/missing-waf/missing-waf-rule.go +++ b/pkg/security/risks/builtin/missing-waf-rule.go @@ -1,11 +1,17 @@ -package missing_waf +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MissingWafRule struct{} + +func NewMissingWafRule() *MissingWafRule { + return &MissingWafRule{} +} + +func (*MissingWafRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "missing-waf", Title: "Missing Web Application Firewall (WAF)", Description: "To have a first line of filtering defense, security architectures with web-services or web-applications should include a WAF in front of them. " + @@ -18,10 +24,10 @@ func Category() model.RiskCategory { Mitigation: "Consider placing a Web Application Firewall (WAF) in front of the web-services and/or web-applications. For cloud environments many cloud providers offer " + "pre-configured WAFs. Even reverse proxies can be enhances by a WAF component via ModSecurity plugins.", Check: "Is a Web Application Firewall (WAF) in place?", - Function: model.Operations, - STRIDE: model.Tampering, + Function: types.Operations, + STRIDE: types.Tampering, DetectionLogic: "In-scope web-services and/or web-applications accessed across a network trust boundary not having a Web Application Firewall (WAF) in front of them.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Targets only accessible via WAFs or reverse proxies containing a WAF component (like ModSecurity) can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -29,20 +35,20 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MissingWafRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { +func (r *MissingWafRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { if !technicalAsset.OutOfScope && (technicalAsset.Technology.IsWebApplication() || technicalAsset.Technology.IsWebService()) { - for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { - if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() && + for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { + if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) && incomingAccess.Protocol.IsPotentialWebAccessProtocol() && - model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Technology != model.WAF { - risks = append(risks, createRisk(technicalAsset)) + input.TechnicalAssets[incomingAccess.SourceId].Technology != types.WAF { + risks = append(risks, r.createRisk(input, technicalAsset)) break } } @@ -51,25 +57,25 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *MissingWafRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Missing Web Application Firewall (WAF) risk at " + technicalAsset.Title + "" - likelihood := model.Unlikely - impact := model.LowImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.MediumImpact + likelihood := types.Unlikely + impact := types.LowImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(input) == types.MissionCritical || + technicalAsset.HighestAvailability(input) == types.MissionCritical { + impact = types.MediumImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go b/pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go similarity index 63% rename from risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go rename to pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go index 3c9ab06b..24f07b16 100644 --- a/risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go +++ b/pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go @@ -1,12 +1,19 @@ -package mixed_targets_on_shared_runtime +package builtin import ( - "github.com/threagile/threagile/model" "sort" + + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type MixedTargetsOnSharedRuntimeRule struct{} + +func NewMixedTargetsOnSharedRuntimeRule() *MixedTargetsOnSharedRuntimeRule { + return &MixedTargetsOnSharedRuntimeRule{} +} + +func (*MixedTargetsOnSharedRuntimeRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "mixed-targets-on-shared-runtime", Title: "Mixed Targets on Shared Runtime", Description: "Different attacker targets (like frontend and backend/datastore components) should not be running on the same " + @@ -20,8 +27,8 @@ func Category() model.RiskCategory { "prevent load- or breach-related problems originating from one more attacker-facing asset impacts also the " + "other more critical rated backend/datastore assets.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.ElevationOfPrivilege, + Function: types.Operations, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "Shared runtime running technical assets of different trust-boundaries is at risk. " + "Also mixing backend/datastore with frontend components on the same shared runtime is considered a risk.", RiskAssessment: "The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of " + @@ -33,31 +40,31 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*MixedTargetsOnSharedRuntimeRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) +func (r *MixedTargetsOnSharedRuntimeRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) // as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: keys := make([]string, 0) - for k, _ := range model.ParsedModelRoot.SharedRuntimes { + for k := range input.SharedRuntimes { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { - sharedRuntime := model.ParsedModelRoot.SharedRuntimes[key] + sharedRuntime := input.SharedRuntimes[key] currentTrustBoundaryId := "" hasFrontend, hasBackend := false, false riskAdded := false for _, technicalAssetId := range sharedRuntime.TechnicalAssetsRunning { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[technicalAssetId] - if len(currentTrustBoundaryId) > 0 && currentTrustBoundaryId != technicalAsset.GetTrustBoundaryId() { - risks = append(risks, createRisk(sharedRuntime)) + technicalAsset := input.TechnicalAssets[technicalAssetId] + if len(currentTrustBoundaryId) > 0 && currentTrustBoundaryId != technicalAsset.GetTrustBoundaryId(input) { + risks = append(risks, r.createRisk(input, sharedRuntime)) riskAdded = true break } - currentTrustBoundaryId = technicalAsset.GetTrustBoundaryId() + currentTrustBoundaryId = technicalAsset.GetTrustBoundaryId(input) if technicalAsset.Technology.IsExclusivelyFrontendRelated() { hasFrontend = true } @@ -66,37 +73,37 @@ func GenerateRisks() []model.Risk { } } if !riskAdded && hasFrontend && hasBackend { - risks = append(risks, createRisk(sharedRuntime)) + risks = append(risks, r.createRisk(input, sharedRuntime)) } } return risks } -func createRisk(sharedRuntime model.SharedRuntime) model.Risk { - impact := model.LowImpact - if isMoreRisky(sharedRuntime) { - impact = model.MediumImpact +func (r *MixedTargetsOnSharedRuntimeRule) createRisk(input *types.ParsedModel, sharedRuntime types.SharedRuntime) types.Risk { + impact := types.LowImpact + if isMoreRisky(input, sharedRuntime) { + impact = types.MediumImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: "Mixed Targets on Shared Runtime named " + sharedRuntime.Title + " might enable attackers moving from one less " + "valuable target to a more valuable one", // TODO list at least the assets in the text which are running on the shared HW MostRelevantSharedRuntimeId: sharedRuntime.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: sharedRuntime.TechnicalAssetsRunning, } - risk.SyntheticId = risk.Category.Id + "@" + sharedRuntime.Id + risk.SyntheticId = risk.CategoryId + "@" + sharedRuntime.Id return risk } -func isMoreRisky(sharedRuntime model.SharedRuntime) bool { +func isMoreRisky(input *types.ParsedModel, sharedRuntime types.SharedRuntime) bool { for _, techAssetId := range sharedRuntime.TechnicalAssetsRunning { - techAsset := model.ParsedModelRoot.TechnicalAssets[techAssetId] - if techAsset.Confidentiality == model.StrictlyConfidential || techAsset.Integrity == model.MissionCritical || - techAsset.Availability == model.MissionCritical { + techAsset := input.TechnicalAssets[techAssetId] + if techAsset.Confidentiality == types.StrictlyConfidential || techAsset.Integrity == types.MissionCritical || + techAsset.Availability == types.MissionCritical { return true } } diff --git a/risks/built-in/path-traversal/path-traversal-rule.go b/pkg/security/risks/builtin/path-traversal-rule.go similarity index 55% rename from risks/built-in/path-traversal/path-traversal-rule.go rename to pkg/security/risks/builtin/path-traversal-rule.go index 1258c039..061b1dfb 100644 --- a/risks/built-in/path-traversal/path-traversal-rule.go +++ b/pkg/security/risks/builtin/path-traversal-rule.go @@ -1,15 +1,21 @@ -package path_traversal +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type PathTraversalRule struct{} + +func NewPathTraversalRule() *PathTraversalRule { + return &PathTraversalRule{} +} + +func (*PathTraversalRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "path-traversal", Title: "Path-Traversal", Description: "When a filesystem is accessed Path-Traversal or Local-File-Inclusion (LFI) risks might arise. " + - "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed or stored.", + "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", Impact: "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, " + "business data files, etc.) from the filesystem of affected components.", ASVS: "V12 - File and Resources Verification Requirements", @@ -20,8 +26,8 @@ func Category() model.RiskCategory { "(partly or fully) provided by the caller. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.InformationDisclosure, + Function: types.Development, + STRIDE: types.InformationDisclosure, DetectionLogic: "Filesystems accessed by in-scope callers.", RiskAssessment: "The risk rating depends on the sensitivity of the data stored inside the technical asset.", FalsePositives: "File accesses by filenames not consisting of parts controllable by the caller can be considered " + @@ -31,51 +37,51 @@ func Category() model.RiskCategory { } } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.Technology != model.FileServer && technicalAsset.Technology != model.LocalFileSystem { +func (*PathTraversalRule) SupportedTags() []string { + return []string{} +} + +func (r *PathTraversalRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.Technology != types.FileServer && technicalAsset.Technology != types.LocalFileSystem { continue } - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] for _, incomingFlow := range incomingFlows { - if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope { + if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope { continue } - likelihood := model.VeryLikely - if incomingFlow.Usage == model.DevOps { - likelihood = model.Likely + likelihood := types.VeryLikely + if incomingFlow.Usage == types.DevOps { + likelihood = types.Likely } - risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood)) + risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood)) } } return risks } -func SupportedTags() []string { - return []string{} -} - -func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk { - caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] +func (r *PathTraversalRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk { + caller := input.TechnicalAssets[incomingFlow.SourceId] title := "Path-Traversal risk at " + caller.Title + " against filesystem " + technicalAsset.Title + "" + " via " + incomingFlow.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical { + impact = types.HighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: caller.Id, MostRelevantCommunicationLinkId: incomingFlow.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id return risk } diff --git a/risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go b/pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go similarity index 57% rename from risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go rename to pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go index d965ce18..6320ec5a 100644 --- a/risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go +++ b/pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go @@ -1,11 +1,17 @@ -package push_instead_of_pull_deployment +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type PushInsteadPullDeploymentRule struct{} + +func NewPushInsteadPullDeploymentRule() *PushInsteadPullDeploymentRule { + return &PushInsteadPullDeploymentRule{} +} + +func (*PushInsteadPullDeploymentRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "push-instead-of-pull-deployment", Title: "Push instead of Pull Deployment", Description: "When comparing push-based vs. pull-based deployments from a security perspective, pull-based " + @@ -19,8 +25,8 @@ func Category() model.RiskCategory { Action: "Build Pipeline Hardening", Mitigation: "Try to prefer pull-based deployments (like GitOps scenarios offer) over push-based deployments to reduce the attack surface of the production system.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Tampering, + Function: types.Architecture, + STRIDE: types.Tampering, DetectionLogic: "Models with build pipeline components accessing in-scope targets of deployment (in a non-readonly way) which " + "are not build-related components themselves.", RiskAssessment: "The risk rating depends on the highest sensitivity of the deployment targets running custom-developed parts.", @@ -31,25 +37,25 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*PushInsteadPullDeploymentRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - impact := model.LowImpact - for _, buildPipeline := range model.ParsedModelRoot.TechnicalAssets { - if buildPipeline.Technology == model.BuildPipeline { +func (r *PushInsteadPullDeploymentRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + impact := types.LowImpact + for _, buildPipeline := range input.TechnicalAssets { + if buildPipeline.Technology == types.BuildPipeline { for _, deploymentLink := range buildPipeline.CommunicationLinks { - targetAsset := model.ParsedModelRoot.TechnicalAssets[deploymentLink.TargetId] - if !deploymentLink.Readonly && deploymentLink.Usage == model.DevOps && - !targetAsset.OutOfScope && !targetAsset.Technology.IsDevelopmentRelevant() && targetAsset.Usage == model.Business { - if targetAsset.HighestConfidentiality() >= model.Confidential || - targetAsset.HighestIntegrity() >= model.Critical || - targetAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact + targetAsset := input.TechnicalAssets[deploymentLink.TargetId] + if !deploymentLink.Readonly && deploymentLink.Usage == types.DevOps && + !targetAsset.OutOfScope && !targetAsset.Technology.IsDevelopmentRelevant() && targetAsset.Usage == types.Business { + if targetAsset.HighestConfidentiality(input) >= types.Confidential || + targetAsset.HighestIntegrity(input) >= types.Critical || + targetAsset.HighestAvailability(input) >= types.Critical { + impact = types.MediumImpact } - risks = append(risks, createRisk(buildPipeline, targetAsset, deploymentLink, impact)) + risks = append(risks, r.createRisk(buildPipeline, targetAsset, deploymentLink, impact)) } } } @@ -57,19 +63,19 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(buildPipeline model.TechnicalAsset, deploymentTarget model.TechnicalAsset, deploymentCommLink model.CommunicationLink, impact model.RiskExploitationImpact) model.Risk { +func (r *PushInsteadPullDeploymentRule) createRisk(buildPipeline types.TechnicalAsset, deploymentTarget types.TechnicalAsset, deploymentCommLink types.CommunicationLink, impact types.RiskExploitationImpact) types.Risk { title := "Push instead of Pull Deployment at " + deploymentTarget.Title + " via build pipeline asset " + buildPipeline.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: deploymentTarget.Id, MostRelevantCommunicationLinkId: deploymentCommLink.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{deploymentTarget.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + buildPipeline.Id + risk.SyntheticId = risk.CategoryId + "@" + buildPipeline.Id return risk } diff --git a/risks/built-in/search-query-injection/search-query-injection-rule.go b/pkg/security/risks/builtin/search-query-injection-rule.go similarity index 55% rename from risks/built-in/search-query-injection/search-query-injection-rule.go rename to pkg/security/risks/builtin/search-query-injection-rule.go index 936ab6e6..749df40e 100644 --- a/risks/built-in/search-query-injection/search-query-injection-rule.go +++ b/pkg/security/risks/builtin/search-query-injection-rule.go @@ -1,11 +1,17 @@ -package search_query_injection +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type SearchQueryInjectionRule struct{} + +func NewSearchQueryInjectionRule() *SearchQueryInjectionRule { + return &SearchQueryInjectionRule{} +} + +func (*SearchQueryInjectionRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "search-query-injection", Title: "Search-Query Injection", Description: "When a search engine server is accessed Search-Query Injection risks might arise." + @@ -21,10 +27,10 @@ func Category() model.RiskCategory { "query unfiltered to the caller. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Tampering, + Function: types.Development, + STRIDE: types.Tampering, DetectionLogic: "In-scope clients accessing search engine servers via typical search access protocols.", - RiskAssessment: "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed or stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed.", FalsePositives: "Server engine queries by search values not consisting of parts controllable by the caller can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -32,23 +38,27 @@ func Category() model.RiskCategory { } } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.Technology == model.SearchEngine || technicalAsset.Technology == model.SearchIndex { - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] +func (*SearchQueryInjectionRule) SupportedTags() []string { + return []string{} +} + +func (r *SearchQueryInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.Technology == types.SearchEngine || technicalAsset.Technology == types.SearchIndex { + incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] for _, incomingFlow := range incomingFlows { - if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope { + if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope { continue } - if incomingFlow.Protocol == model.HTTP || incomingFlow.Protocol == model.HTTPS || - incomingFlow.Protocol == model.BINARY || incomingFlow.Protocol == model.BINARY_encrypted { - likelihood := model.VeryLikely - if incomingFlow.Usage == model.DevOps { - likelihood = model.Likely + if incomingFlow.Protocol == types.HTTP || incomingFlow.Protocol == types.HTTPS || + incomingFlow.Protocol == types.BINARY || incomingFlow.Protocol == types.BinaryEncrypted { + likelihood := types.VeryLikely + if incomingFlow.Usage == types.DevOps { + likelihood = types.Likely } - risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood)) + risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood)) } } } @@ -56,31 +66,27 @@ func GenerateRisks() []model.Risk { return risks } -func SupportedTags() []string { - return []string{} -} - -func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk { - caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] +func (r *SearchQueryInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk { + caller := input.TechnicalAssets[incomingFlow.SourceId] title := "Search Query Injection risk at " + caller.Title + " against search engine server " + technicalAsset.Title + "" + " via " + incomingFlow.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.HighImpact - } else if technicalAsset.HighestConfidentiality() <= model.Internal && technicalAsset.HighestIntegrity() == model.Operational { - impact = model.LowImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical { + impact = types.HighImpact + } else if technicalAsset.HighestConfidentiality(input) <= types.Internal && technicalAsset.HighestIntegrity(input) == types.Operational { + impact = types.LowImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: caller.Id, MostRelevantCommunicationLinkId: incomingFlow.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id return risk } diff --git a/risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go b/pkg/security/risks/builtin/server-side-request-forgery-rule.go similarity index 62% rename from risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go rename to pkg/security/risks/builtin/server-side-request-forgery-rule.go index 901c5fcf..45c3ce27 100644 --- a/risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go +++ b/pkg/security/risks/builtin/server-side-request-forgery-rule.go @@ -1,11 +1,17 @@ -package server_side_request_forgery +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type ServerSideRequestForgeryRule struct{} + +func NewServerSideRequestForgeryRule() *ServerSideRequestForgeryRule { + return &ServerSideRequestForgeryRule{} +} + +func (*ServerSideRequestForgeryRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "server-side-request-forgery", Title: "Server-Side Request Forgery (SSRF)", Description: "When a server system (i.e. not a client) is accessing other server systems via typical web protocols " + @@ -18,8 +24,8 @@ func Category() model.RiskCategory { "controllable values. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.InformationDisclosure, + Function: types.Development, + STRIDE: types.InformationDisclosure, DetectionLogic: "In-scope non-client systems accessing (using outgoing communication links) targets with either HTTP or HTTPS protocol.", RiskAssessment: "The risk rating (low or medium) depends on the sensitivity of the data assets receivable via web protocols from " + "targets within the same network trust-boundary as well on the sensitivity of the data assets receivable via web protocols from the target asset itself. " + @@ -31,73 +37,73 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*ServerSideRequestForgeryRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.OutOfScope || technicalAsset.Technology.IsClient() || technicalAsset.Technology == model.LoadBalancer { +func (r *ServerSideRequestForgeryRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if technicalAsset.OutOfScope || technicalAsset.Technology.IsClient() || technicalAsset.Technology == types.LoadBalancer { continue } for _, outgoingFlow := range technicalAsset.CommunicationLinks { if outgoingFlow.Protocol.IsPotentialWebAccessProtocol() { - risks = append(risks, createRisk(technicalAsset, outgoingFlow)) + risks = append(risks, r.createRisk(input, technicalAsset, outgoingFlow)) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset, outgoingFlow model.CommunicationLink) model.Risk { - target := model.ParsedModelRoot.TechnicalAssets[outgoingFlow.TargetId] +func (r *ServerSideRequestForgeryRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, outgoingFlow types.CommunicationLink) types.Risk { + target := input.TechnicalAssets[outgoingFlow.TargetId] title := "Server-Side Request Forgery (SSRF) risk at " + technicalAsset.Title + " server-side web-requesting " + "the target " + target.Title + " via " + outgoingFlow.Title + "" - impact := model.LowImpact + impact := types.LowImpact // check by the target itself (can be in another trust-boundary) - if target.HighestConfidentiality() == model.StrictlyConfidential { - impact = model.MediumImpact + if target.HighestConfidentiality(input) == types.StrictlyConfidential { + impact = types.MediumImpact } // check all potential attack targets within the same trust boundary (accessible via web protocols) uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{}) uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true - for _, potentialTargetAsset := range model.ParsedModelRoot.TechnicalAssets { - if technicalAsset.IsSameTrustBoundaryNetworkOnly(potentialTargetAsset.Id) { - for _, commLinkIncoming := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[potentialTargetAsset.Id] { + for _, potentialTargetAsset := range input.TechnicalAssets { + if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, potentialTargetAsset.Id) { + for _, commLinkIncoming := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[potentialTargetAsset.Id] { if commLinkIncoming.Protocol.IsPotentialWebAccessProtocol() { uniqueDataBreachTechnicalAssetIDs[potentialTargetAsset.Id] = true - if potentialTargetAsset.HighestConfidentiality() == model.StrictlyConfidential { - impact = model.MediumImpact + if potentialTargetAsset.HighestConfidentiality(input) == types.StrictlyConfidential { + impact = types.MediumImpact } } } } } // adjust for cloud-based special risks - if impact == model.LowImpact && model.ParsedModelRoot.TrustBoundaries[technicalAsset.GetTrustBoundaryId()].Type.IsWithinCloud() { - impact = model.MediumImpact + if impact == types.LowImpact && input.TrustBoundaries[technicalAsset.GetTrustBoundaryId(input)].Type.IsWithinCloud() { + impact = types.MediumImpact } dataBreachTechnicalAssetIDs := make([]string, 0) - for key, _ := range uniqueDataBreachTechnicalAssetIDs { + for key := range uniqueDataBreachTechnicalAssetIDs { dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key) } - likelihood := model.Likely - if outgoingFlow.Usage == model.DevOps { - likelihood = model.Unlikely + likelihood := types.Likely + if outgoingFlow.Usage == types.DevOps { + likelihood = types.Unlikely } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: outgoingFlow.Id, - DataBreachProbability: model.Possible, + DataBreachProbability: types.Possible, DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + target.Id + "@" + outgoingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + target.Id + "@" + outgoingFlow.Id return risk } diff --git a/pkg/security/risks/builtin/service-registry-poisoning-rule.go b/pkg/security/risks/builtin/service-registry-poisoning-rule.go new file mode 100644 index 00000000..94be965c --- /dev/null +++ b/pkg/security/risks/builtin/service-registry-poisoning-rule.go @@ -0,0 +1,79 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type ServiceRegistryPoisoningRule struct{} + +func NewServiceRegistryPoisoningRule() *ServiceRegistryPoisoningRule { + return &ServiceRegistryPoisoningRule{} +} + +func (*ServiceRegistryPoisoningRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "service-registry-poisoning", + Title: "Service Registry Poisoning", + Description: "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.", + Impact: "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or " + + "malicious lookup and config data leading to breach of sensitive data.", + ASVS: "V10 - Malicious Code Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", + Action: "Service Registry Integrity Check", + Mitigation: "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.Spoofing, + DetectionLogic: "In-scope service registries.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical assets accessing the service registry " + + "as well as the data assets processed.", + FalsePositives: "Service registries not used for service discovery " + + "can be considered as false positives after individual review.", + ModelFailurePossibleReason: false, + CWE: 693, + } +} + +func (*ServiceRegistryPoisoningRule) SupportedTags() []string { + return []string{} +} + +func (r *ServiceRegistryPoisoningRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope && technicalAsset.Technology == types.ServiceRegistry { + incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + risks = append(risks, r.createRisk(input, technicalAsset, incomingFlows)) + } + } + return risks +} + +func (r *ServiceRegistryPoisoningRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlows []types.CommunicationLink) types.Risk { + title := "Service Registry Poisoning risk at " + technicalAsset.Title + "" + impact := types.LowImpact + + for _, incomingFlow := range incomingFlows { + caller := input.TechnicalAssets[incomingFlow.SourceId] + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical || technicalAsset.HighestAvailability(input) == types.MissionCritical || + caller.HighestConfidentiality(input) == types.StrictlyConfidential || caller.HighestIntegrity(input) == types.MissionCritical || caller.HighestAvailability(input) == types.MissionCritical || + incomingFlow.HighestConfidentiality(input) == types.StrictlyConfidential || incomingFlow.HighestIntegrity(input) == types.MissionCritical || incomingFlow.HighestAvailability(input) == types.MissionCritical { + impact = types.MediumImpact + break + } + } + + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: title, + MostRelevantTechnicalAssetId: technicalAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: find all service-lookup-using tech assets, which then might use spoofed lookups? + } + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + return risk +} diff --git a/risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go b/pkg/security/risks/builtin/sql-nosql-injection-rule.go similarity index 56% rename from risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go rename to pkg/security/risks/builtin/sql-nosql-injection-rule.go index 331f54e8..f2684333 100644 --- a/risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go +++ b/pkg/security/risks/builtin/sql-nosql-injection-rule.go @@ -1,15 +1,21 @@ -package sql_nosql_injection +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type SqlNoSqlInjectionRule struct{} + +func NewSqlNoSqlInjectionRule() *SqlNoSqlInjectionRule { + return &SqlNoSqlInjectionRule{} +} + +func (*SqlNoSqlInjectionRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "sql-nosql-injection", Title: "SQL/NoSQL-Injection", Description: "When a database is accessed via database access protocols SQL/NoSQL-Injection risks might arise. " + - "The risk rating depends on the sensitivity technical asset itself and of the data assets processed or stored.", + "The risk rating depends on the sensitivity technical asset itself and of the data assets processed.", Impact: "If this risk is unmitigated, attackers might be able to modify SQL/NoSQL queries to steal and modify data and eventually further escalate towards a deeper system penetration via code executions.", ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements", CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html", @@ -17,8 +23,8 @@ func Category() model.RiskCategory { Mitigation: "Try to use parameter binding to be safe from injection vulnerabilities. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.Tampering, + Function: types.Development, + STRIDE: types.Tampering, DetectionLogic: "Database accessed via typical database access protocols by in-scope clients.", RiskAssessment: "The risk rating depends on the sensitivity of the data stored inside the database.", FalsePositives: "Database accesses by queries not consisting of parts controllable by the caller can be considered " + @@ -28,51 +34,51 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*SqlNoSqlInjectionRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] +func (r *SqlNoSqlInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] for _, incomingFlow := range incomingFlows { - if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope { + if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope { continue } - if incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(true) && (technicalAsset.Technology == model.Database || technicalAsset.Technology == model.IdentityStoreDatabase) || + if incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(true) && (technicalAsset.Technology == types.Database || technicalAsset.Technology == types.IdentityStoreDatabase) || (incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(false)) { - risks = append(risks, createRisk(technicalAsset, incomingFlow)) + risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow)) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink) model.Risk { - caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] +func (r *SqlNoSqlInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink) types.Risk { + caller := input.TechnicalAssets[incomingFlow.SourceId] title := "SQL/NoSQL-Injection risk at " + caller.Title + " against database " + technicalAsset.Title + "" + " via " + incomingFlow.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical { + impact = types.HighImpact } - likelihood := model.VeryLikely - if incomingFlow.Usage == model.DevOps { - likelihood = model.Likely + likelihood := types.VeryLikely + if incomingFlow.Usage == types.DevOps { + likelihood = types.Likely } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: caller.Id, MostRelevantCommunicationLinkId: incomingFlow.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id + risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id return risk } diff --git a/risks/built-in/unchecked-deployment/unchecked-deployment-rule.go b/pkg/security/risks/builtin/unchecked-deployment-rule.go similarity index 64% rename from risks/built-in/unchecked-deployment/unchecked-deployment-rule.go rename to pkg/security/risks/builtin/unchecked-deployment-rule.go index 34baf45f..1f864ab0 100644 --- a/risks/built-in/unchecked-deployment/unchecked-deployment-rule.go +++ b/pkg/security/risks/builtin/unchecked-deployment-rule.go @@ -1,11 +1,17 @@ -package unchecked_deployment +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UncheckedDeploymentRule struct{} + +func NewUncheckedDeploymentRule() *UncheckedDeploymentRule { + return &UncheckedDeploymentRule{} +} + +func (*UncheckedDeploymentRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unchecked-deployment", Title: "Unchecked Deployment", Description: "For each build-pipeline component Unchecked Deployment risks might arise when the build-pipeline " + @@ -20,8 +26,8 @@ func Category() model.RiskCategory { Mitigation: "Apply DevSecOps best-practices and use scanning tools to identify vulnerabilities in source- or byte-code," + "dependencies, container layers, and optionally also via dynamic scans against running test systems.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Tampering, + Function: types.Architecture, + STRIDE: types.Tampering, DetectionLogic: "All development-relevant technical assets.", RiskAssessment: "The risk rating depends on the highest rating of the technical assets and data assets processed by deployment-receiving targets.", FalsePositives: "When the build-pipeline does not build any software components it can be considered a false positive " + @@ -31,39 +37,39 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*UncheckedDeploymentRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { +func (r *UncheckedDeploymentRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { if technicalAsset.Technology.IsDevelopmentRelevant() { - risks = append(risks, createRisk(technicalAsset)) + risks = append(risks, r.createRisk(input, technicalAsset)) } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *UncheckedDeploymentRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "Unchecked Deployment risk at " + technicalAsset.Title + "" // impact is depending on highest rating - impact := model.LowImpact + impact := types.LowImpact // data breach at all deployment targets uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{}) uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true for _, codeDeploymentTargetCommLink := range technicalAsset.CommunicationLinks { - if codeDeploymentTargetCommLink.Usage == model.DevOps { + if codeDeploymentTargetCommLink.Usage == types.DevOps { for _, dataAssetID := range codeDeploymentTargetCommLink.DataAssetsSent { // it appears to be code when elevated integrity rating of sent data asset - if model.ParsedModelRoot.DataAssets[dataAssetID].Integrity >= model.Important { + if input.DataAssets[dataAssetID].Integrity >= types.Important { // here we've got a deployment target which has its data assets at risk via deployment of backdoored code uniqueDataBreachTechnicalAssetIDs[codeDeploymentTargetCommLink.TargetId] = true - targetTechAsset := model.ParsedModelRoot.TechnicalAssets[codeDeploymentTargetCommLink.TargetId] - if targetTechAsset.HighestConfidentiality() >= model.Confidential || - targetTechAsset.HighestIntegrity() >= model.Critical || - targetTechAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact + targetTechAsset := input.TechnicalAssets[codeDeploymentTargetCommLink.TargetId] + if targetTechAsset.HighestConfidentiality(input) >= types.Confidential || + targetTechAsset.HighestIntegrity(input) >= types.Critical || + targetTechAsset.HighestAvailability(input) >= types.Critical { + impact = types.MediumImpact } break } @@ -71,20 +77,20 @@ func createRisk(technicalAsset model.TechnicalAsset) model.Risk { } } dataBreachTechnicalAssetIDs := make([]string, 0) - for key, _ := range uniqueDataBreachTechnicalAssetIDs { + for key := range uniqueDataBreachTechnicalAssetIDs { dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key) } // create risk - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Possible, + DataBreachProbability: types.Possible, DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/pkg/security/risks/builtin/unencrypted-asset-rule.go b/pkg/security/risks/builtin/unencrypted-asset-rule.go new file mode 100644 index 00000000..3b5d64dc --- /dev/null +++ b/pkg/security/risks/builtin/unencrypted-asset-rule.go @@ -0,0 +1,98 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type UnencryptedAssetRule struct{} + +func NewUnencryptedAssetRule() *UnencryptedAssetRule { + return &UnencryptedAssetRule{} +} + +func (*UnencryptedAssetRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "unencrypted-asset", + Title: "Unencrypted Technical Assets", + Description: "Due to the confidentiality rating of the technical asset itself and/or the processed data assets " + + "this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.", + Impact: "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.", + ASVS: "V6 - Stored Cryptography Verification Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html", + Action: "Encryption of Technical Asset", + Mitigation: "Apply encryption to the technical asset.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Operations, + STRIDE: types.InformationDisclosure, + DetectionLogic: "In-scope unencrypted technical assets (excluding " + types.ReverseProxy.String() + + ", " + types.LoadBalancer.String() + ", " + types.WAF.String() + ", " + types.IDS.String() + + ", " + types.IPS.String() + " and embedded components like " + types.Library.String() + ") " + + "storing data assets rated at least as " + types.Confidential.String() + " or " + types.Critical.String() + ". " + + "For technical assets storing data assets rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + " the " + + "encryption must be of type " + types.DataWithEndUserIndividualKey.String() + ".", + // NOTE: the risk assesment does not only consider the CIs of the *stored* data-assets + RiskAssessment: "Depending on the confidentiality rating of the stored data-assets either medium or high risk.", + FalsePositives: "When all sensitive data stored within the asset is already fully encrypted on document or data level.", + ModelFailurePossibleReason: false, + CWE: 311, + } +} + +func (*UnencryptedAssetRule) SupportedTags() []string { + return []string{} +} + +// check for technical assets that should be encrypted due to their confidentiality + +func (r *UnencryptedAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope && !isEncryptionWaiver(technicalAsset) && + (technicalAsset.HighestConfidentiality(input) >= types.Confidential || + technicalAsset.HighestIntegrity(input) >= types.Critical) { + verySensitive := technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(input) == types.MissionCritical + requiresEndUserKey := verySensitive && technicalAsset.Technology.IsUsuallyStoringEndUserData() + if technicalAsset.Encryption == types.NoneEncryption { + impact := types.MediumImpact + if verySensitive { + impact = types.HighImpact + } + risks = append(risks, r.createRisk(technicalAsset, impact, requiresEndUserKey)) + } else if requiresEndUserKey && + (technicalAsset.Encryption == types.Transparent || technicalAsset.Encryption == types.DataWithSymmetricSharedKey || technicalAsset.Encryption == types.DataWithAsymmetricSharedKey) { + risks = append(risks, r.createRisk(technicalAsset, types.MediumImpact, requiresEndUserKey)) + } + } + } + return risks +} + +// Simple routing assets like 'Reverse Proxy' or 'Load Balancer' usually don't have their own storage and thus have no +// encryption requirement for the asset itself (though for the communication, but that's a different rule) + +func isEncryptionWaiver(asset types.TechnicalAsset) bool { + return asset.Technology == types.ReverseProxy || asset.Technology == types.LoadBalancer || + asset.Technology == types.WAF || asset.Technology == types.IDS || asset.Technology == types.IPS || + asset.Technology.IsEmbeddedComponent() +} + +func (r *UnencryptedAssetRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact, requiresEndUserKey bool) types.Risk { + title := "Unencrypted Technical Asset named " + technicalAsset.Title + "" + if requiresEndUserKey { + title += " missing end user individual encryption with " + types.DataWithEndUserIndividualKey.String() + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: impact, + Title: title, + MostRelevantTechnicalAssetId: technicalAsset.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + return risk +} diff --git a/risks/built-in/unencrypted-communication/unencrypted-communication-rule.go b/pkg/security/risks/builtin/unencrypted-communication-rule.go similarity index 52% rename from risks/built-in/unencrypted-communication/unencrypted-communication-rule.go rename to pkg/security/risks/builtin/unencrypted-communication-rule.go index 0718540b..aee4181c 100644 --- a/risks/built-in/unencrypted-communication/unencrypted-communication-rule.go +++ b/pkg/security/risks/builtin/unencrypted-communication-rule.go @@ -1,11 +1,17 @@ -package unencrypted_communication +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UnencryptedCommunicationRule struct{} + +func NewUnencryptedCommunicationRule() *UnencryptedCommunicationRule { + return &UnencryptedCommunicationRule{} +} + +func (*UnencryptedCommunicationRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unencrypted-communication", Title: "Unencrypted Communication", Description: "Due to the confidentiality and/or integrity rating of the data assets transferred over the " + @@ -16,9 +22,9 @@ func Category() model.RiskCategory { Action: "Encryption of Communication Links", Mitigation: "Apply transport layer encryption to the communication link.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.InformationDisclosure, - DetectionLogic: "Unencrypted technical communication links of in-scope technical assets (excluding " + model.Monitoring.String() + " traffic as well as " + model.LocalFileAccess.String() + " and " + model.InProcessLibraryCall.String() + ") " + + Function: types.Operations, + STRIDE: types.InformationDisclosure, + DetectionLogic: "Unencrypted technical communication links of in-scope technical assets (excluding " + types.Monitoring.String() + " traffic as well as " + types.LocalFileAccess.String() + " and " + types.InProcessLibraryCall.String() + ") " + "transferring sensitive data.", // TODO more detailed text required here RiskAssessment: "Depending on the confidentiality rating of the transferred data-assets either medium or high risk.", FalsePositives: "When all sensitive data sent over the communication link is already fully encrypted on document or data level. " + @@ -28,43 +34,44 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*UnencryptedCommunicationRule) SupportedTags() []string { return []string{} } // check for communication links that should be encrypted due to their confidentiality and/or integrity -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { + +func (r *UnencryptedCommunicationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, technicalAsset := range input.TechnicalAssets { for _, dataFlow := range technicalAsset.CommunicationLinks { - transferringAuthData := dataFlow.Authentication != model.NoneAuthentication - sourceAsset := model.ParsedModelRoot.TechnicalAssets[dataFlow.SourceId] - targetAsset := model.ParsedModelRoot.TechnicalAssets[dataFlow.TargetId] + transferringAuthData := dataFlow.Authentication != types.NoneAuthentication + sourceAsset := input.TechnicalAssets[dataFlow.SourceId] + targetAsset := input.TechnicalAssets[dataFlow.TargetId] if !technicalAsset.OutOfScope || !sourceAsset.OutOfScope { if !dataFlow.Protocol.IsEncrypted() && !dataFlow.Protocol.IsProcessLocal() && - !sourceAsset.Technology.IsUnprotectedCommsTolerated() && - !targetAsset.Technology.IsUnprotectedCommsTolerated() { + !sourceAsset.Technology.IsUnprotectedCommunicationsTolerated() && + !targetAsset.Technology.IsUnprotectedCommunicationsTolerated() { addedOne := false for _, sentDataAsset := range dataFlow.DataAssetsSent { - dataAsset := model.ParsedModelRoot.DataAssets[sentDataAsset] + dataAsset := input.DataAssets[sentDataAsset] if isHighSensitivity(dataAsset) || transferringAuthData { - risks = append(risks, createRisk(technicalAsset, dataFlow, true, transferringAuthData)) + risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, true, transferringAuthData)) addedOne = true break } else if !dataFlow.VPN && isMediumSensitivity(dataAsset) { - risks = append(risks, createRisk(technicalAsset, dataFlow, false, transferringAuthData)) + risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, false, transferringAuthData)) addedOne = true break } } if !addedOne { for _, receivedDataAsset := range dataFlow.DataAssetsReceived { - dataAsset := model.ParsedModelRoot.DataAssets[receivedDataAsset] + dataAsset := input.DataAssets[receivedDataAsset] if isHighSensitivity(dataAsset) || transferringAuthData { - risks = append(risks, createRisk(technicalAsset, dataFlow, true, transferringAuthData)) + risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, true, transferringAuthData)) break } else if !dataFlow.VPN && isMediumSensitivity(dataAsset) { - risks = append(risks, createRisk(technicalAsset, dataFlow, false, transferringAuthData)) + risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, false, transferringAuthData)) break } } @@ -76,43 +83,43 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset, dataFlow model.CommunicationLink, highRisk bool, transferringAuthData bool) model.Risk { - impact := model.MediumImpact +func (r *UnencryptedCommunicationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, dataFlow types.CommunicationLink, highRisk bool, transferringAuthData bool) types.Risk { + impact := types.MediumImpact if highRisk { - impact = model.HighImpact + impact = types.HighImpact } - target := model.ParsedModelRoot.TechnicalAssets[dataFlow.TargetId] + target := input.TechnicalAssets[dataFlow.TargetId] title := "Unencrypted Communication named " + dataFlow.Title + " between " + technicalAsset.Title + " and " + target.Title + "" if transferringAuthData { title += " transferring authentication data (like credentials, token, session-id, etc.)" } if dataFlow.VPN { title += " (even VPN-protected connections need to encrypt their data in-transit when confidentiality is " + - "rated " + model.StrictlyConfidential.String() + " or integrity is rated " + model.MissionCritical.String() + ")" + "rated " + types.StrictlyConfidential.String() + " or integrity is rated " + types.MissionCritical.String() + ")" } - likelihood := model.Unlikely - if dataFlow.IsAcrossTrustBoundaryNetworkOnly() { - likelihood = model.Likely + likelihood := types.Unlikely + if dataFlow.IsAcrossTrustBoundaryNetworkOnly(input) { + likelihood = types.Likely } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: dataFlow.Id, - DataBreachProbability: model.Possible, + DataBreachProbability: types.Possible, DataBreachTechnicalAssetIDs: []string{target.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + dataFlow.Id + "@" + technicalAsset.Id + "@" + target.Id + risk.SyntheticId = risk.CategoryId + "@" + dataFlow.Id + "@" + technicalAsset.Id + "@" + target.Id return risk } -func isHighSensitivity(dataAsset model.DataAsset) bool { - return dataAsset.Confidentiality == model.StrictlyConfidential || dataAsset.Integrity == model.MissionCritical +func isHighSensitivity(dataAsset types.DataAsset) bool { + return dataAsset.Confidentiality == types.StrictlyConfidential || dataAsset.Integrity == types.MissionCritical } -func isMediumSensitivity(dataAsset model.DataAsset) bool { - return dataAsset.Confidentiality == model.Confidential || dataAsset.Integrity == model.Critical +func isMediumSensitivity(dataAsset types.DataAsset) bool { + return dataAsset.Confidentiality == types.Confidential || dataAsset.Integrity == types.Critical } diff --git a/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go b/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go new file mode 100644 index 00000000..9e1ec39d --- /dev/null +++ b/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go @@ -0,0 +1,112 @@ +package builtin + +import ( + "sort" + + "github.com/threagile/threagile/pkg/security/types" +) + +type UnguardedAccessFromInternetRule struct{} + +func NewUnguardedAccessFromInternetRule() *UnguardedAccessFromInternetRule { + return &UnguardedAccessFromInternetRule{} +} + +func (*UnguardedAccessFromInternetRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "unguarded-access-from-internet", + Title: "Unguarded Access From Internet", + Description: "Internet-exposed assets must be guarded by a protecting service, application, " + + "or reverse-proxy.", + Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between " + + "due to them being directly exposed on the internet.", + ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + Action: "Encapsulation of Technical Asset", + Mitigation: "Encapsulate the asset behind a guarding service, application, or reverse-proxy. " + + "For admin maintenance a bastion-host should be used as a jump-server. " + + "For file transfer a store-and-forward-host should be used as an indirect file exchange platform.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope technical assets (excluding " + types.LoadBalancer.String() + ") with confidentiality rating " + + "of " + types.Confidential.String() + " (or higher) or with integrity rating of " + types.Critical.String() + " (or higher) when " + + "accessed directly from the internet. All " + + types.WebServer.String() + ", " + types.WebApplication.String() + ", " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", and " + types.Gateway.String() + " assets are exempted from this risk when " + + "they do not consist of custom developed code and " + + "the data-flow only consists of HTTP or FTP protocols. Access from " + types.Monitoring.String() + " systems " + + "as well as VPN-protected connections are exempted.", + RiskAssessment: "The matching technical assets are at " + types.LowSeverity.String() + " risk. When either the " + + "confidentiality rating is " + types.StrictlyConfidential.String() + " or the integrity rating " + + "is " + types.MissionCritical.String() + ", the risk-rating is considered " + types.MediumSeverity.String() + ". " + + "For assets with RAA values higher than 40 % the risk-rating increases.", + FalsePositives: "When other means of filtering client requests are applied equivalent of " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", or " + types.Gateway.String() + " components.", + ModelFailurePossibleReason: false, + CWE: 501, + } +} + +func (*UnguardedAccessFromInternetRule) SupportedTags() []string { + return []string{} +} + +func (r *UnguardedAccessFromInternetRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope { + commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + sort.Sort(types.ByTechnicalCommunicationLinkIdSort(commLinks)) + for _, incomingAccess := range commLinks { + if technicalAsset.Technology != types.LoadBalancer { + if !technicalAsset.CustomDevelopedParts { + if (technicalAsset.Technology == types.WebServer || technicalAsset.Technology == types.WebApplication || technicalAsset.Technology == types.ReverseProxy || technicalAsset.Technology == types.WAF || technicalAsset.Technology == types.Gateway) && + (incomingAccess.Protocol == types.HTTP || incomingAccess.Protocol == types.HTTPS) { + continue + } + if technicalAsset.Technology == types.Gateway && + (incomingAccess.Protocol == types.FTP || incomingAccess.Protocol == types.FTPS || incomingAccess.Protocol == types.SFTP) { + continue + } + } + if input.TechnicalAssets[incomingAccess.SourceId].Technology == types.Monitoring || + incomingAccess.VPN { + continue + } + if technicalAsset.Confidentiality >= types.Confidential || technicalAsset.Integrity >= types.Critical { + sourceAsset := input.TechnicalAssets[incomingAccess.SourceId] + if sourceAsset.Internet { + highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical + risks = append(risks, r.createRisk(technicalAsset, incomingAccess, + input.TechnicalAssets[incomingAccess.SourceId], highRisk)) + } + } + } + } + } + } + return risks +} + +func (r *UnguardedAccessFromInternetRule) createRisk(dataStore types.TechnicalAsset, dataFlow types.CommunicationLink, + clientFromInternet types.TechnicalAsset, moreRisky bool) types.Risk { + impact := types.LowImpact + if moreRisky || dataStore.RAA > 40 { + impact = types.MediumImpact + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.VeryLikely, impact), + ExploitationLikelihood: types.VeryLikely, + ExploitationImpact: impact, + Title: "Unguarded Access from Internet of " + dataStore.Title + " by " + + clientFromInternet.Title + "" + " via " + dataFlow.Title + "", + MostRelevantTechnicalAssetId: dataStore.Id, + MostRelevantCommunicationLinkId: dataFlow.Id, + DataBreachProbability: types.Possible, + DataBreachTechnicalAssetIDs: []string{dataStore.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + dataStore.Id + "@" + clientFromInternet.Id + "@" + dataFlow.Id + return risk +} diff --git a/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go b/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go new file mode 100644 index 00000000..42e1d600 --- /dev/null +++ b/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go @@ -0,0 +1,121 @@ +package builtin + +import ( + "github.com/threagile/threagile/pkg/security/types" +) + +type UnguardedDirectDatastoreAccessRule struct{} + +func NewUnguardedDirectDatastoreAccessRule() *UnguardedDirectDatastoreAccessRule { + return &UnguardedDirectDatastoreAccessRule{} +} + +func (*UnguardedDirectDatastoreAccessRule) Category() types.RiskCategory { + return types.RiskCategory{ + Id: "unguarded-direct-datastore-access", + Title: "Unguarded Direct Datastore Access", + Description: "Data stores accessed across trust boundaries must be guarded by some protecting service or application.", + Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive data stores without any protecting components in-between.", + ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", + CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + Action: "Encapsulation of Datastore", + Mitigation: "Encapsulate the datastore access behind a guarding service or application.", + Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "In-scope technical assets of type " + types.Datastore.String() + " (except " + types.IdentityStoreLDAP.String() + " when accessed from " + types.IdentityProvider.String() + " and " + types.FileServer.String() + " when accessed via file transfer protocols) with confidentiality rating " + + "of " + types.Confidential.String() + " (or higher) or with integrity rating of " + types.Critical.String() + " (or higher) " + + "which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.", // TODO new rule "missing bastion host"? + RiskAssessment: "The matching technical assets are at " + types.LowSeverity.String() + " risk. When either the " + + "confidentiality rating is " + types.StrictlyConfidential.String() + " or the integrity rating " + + "is " + types.MissionCritical.String() + ", the risk-rating is considered " + types.MediumSeverity.String() + ". " + + "For assets with RAA values higher than 40 % the risk-rating increases.", + FalsePositives: "When the caller is considered fully trusted as if it was part of the datastore itself.", + ModelFailurePossibleReason: false, + CWE: 501, + } +} + +func (*UnguardedDirectDatastoreAccessRule) SupportedTags() []string { + return []string{} +} + +// check for data stores that should not be accessed directly across trust boundaries + +func (r *UnguardedDirectDatastoreAccessRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] + if !technicalAsset.OutOfScope && technicalAsset.Type == types.Datastore { + for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { + sourceAsset := input.TechnicalAssets[incomingAccess.SourceId] + if (technicalAsset.Technology == types.IdentityStoreLDAP || technicalAsset.Technology == types.IdentityStoreDatabase) && + sourceAsset.Technology == types.IdentityProvider { + continue + } + if technicalAsset.Confidentiality >= types.Confidential || technicalAsset.Integrity >= types.Critical { + if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) && !fileServerAccessViaFTP(technicalAsset, incomingAccess) && + incomingAccess.Usage != types.DevOps && !isSharingSameParentTrustBoundary(input, technicalAsset, sourceAsset) { + highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential || + technicalAsset.Integrity == types.MissionCritical + risks = append(risks, r.createRisk(technicalAsset, incomingAccess, + input.TechnicalAssets[incomingAccess.SourceId], highRisk)) + } + } + } + } + } + return risks +} + +func isSharingSameParentTrustBoundary(input *types.ParsedModel, left, right types.TechnicalAsset) bool { + tbIDLeft, tbIDRight := left.GetTrustBoundaryId(input), right.GetTrustBoundaryId(input) + if len(tbIDLeft) == 0 && len(tbIDRight) > 0 { + return false + } + if len(tbIDLeft) > 0 && len(tbIDRight) == 0 { + return false + } + if len(tbIDLeft) == 0 && len(tbIDRight) == 0 { + return true + } + if tbIDLeft == tbIDRight { + return true + } + tbLeft, tbRight := input.TrustBoundaries[tbIDLeft], input.TrustBoundaries[tbIDRight] + tbParentsLeft, tbParentsRight := tbLeft.AllParentTrustBoundaryIDs(input), tbRight.AllParentTrustBoundaryIDs(input) + for _, parentLeft := range tbParentsLeft { + for _, parentRight := range tbParentsRight { + if parentLeft == parentRight { + return true + } + } + } + return false +} + +func fileServerAccessViaFTP(technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink) bool { + return technicalAsset.Technology == types.FileServer && + (incomingAccess.Protocol == types.FTP || incomingAccess.Protocol == types.FTPS || incomingAccess.Protocol == types.SFTP) +} + +func (r *UnguardedDirectDatastoreAccessRule) createRisk(dataStore types.TechnicalAsset, dataFlow types.CommunicationLink, clientOutsideTrustBoundary types.TechnicalAsset, moreRisky bool) types.Risk { + impact := types.LowImpact + if moreRisky || dataStore.RAA > 40 { + impact = types.MediumImpact + } + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Likely, impact), + ExploitationLikelihood: types.Likely, + ExploitationImpact: impact, + Title: "Unguarded Direct Datastore Access of " + dataStore.Title + " by " + + clientOutsideTrustBoundary.Title + " via " + dataFlow.Title + "", + MostRelevantTechnicalAssetId: dataStore.Id, + MostRelevantCommunicationLinkId: dataFlow.Id, + DataBreachProbability: types.Improbable, + DataBreachTechnicalAssetIDs: []string{dataStore.Id}, + } + risk.SyntheticId = risk.CategoryId + "@" + dataFlow.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataStore.Id + return risk +} diff --git a/risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go b/pkg/security/risks/builtin/unnecessary-communication-link-rule.go similarity index 54% rename from risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go rename to pkg/security/risks/builtin/unnecessary-communication-link-rule.go index 80843a32..66ed716b 100644 --- a/risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go +++ b/pkg/security/risks/builtin/unnecessary-communication-link-rule.go @@ -1,11 +1,17 @@ -package unnecessary_communication_link +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UnnecessaryCommunicationLinkRule struct{} + +func NewUnnecessaryCommunicationLinkRule() *UnnecessaryCommunicationLinkRule { + return &UnnecessaryCommunicationLinkRule{} +} + +func (*UnnecessaryCommunicationLinkRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unnecessary-communication-link", Title: "Unnecessary Communication Link", Description: "When a technical communication link does not send or receive any data assets, this is " + @@ -16,28 +22,28 @@ func Category() model.RiskCategory { Action: "Attack Surface Reduction", Mitigation: "Try to avoid using technical communication links that do not send or receive anything.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "In-scope technical assets' technical communication links not sending or receiving any data assets.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*UnnecessaryCommunicationLinkRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *UnnecessaryCommunicationLinkRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] for _, commLink := range technicalAsset.CommunicationLinks { if len(commLink.DataAssetsSent) == 0 && len(commLink.DataAssetsReceived) == 0 { - if !technicalAsset.OutOfScope || !model.ParsedModelRoot.TechnicalAssets[commLink.TargetId].OutOfScope { - risks = append(risks, createRisk(technicalAsset, commLink)) + if !technicalAsset.OutOfScope || !input.TechnicalAssets[commLink.TargetId].OutOfScope { + risks = append(risks, r.createRisk(technicalAsset, commLink)) } } } @@ -45,19 +51,19 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink) model.Risk { +func (r *UnnecessaryCommunicationLinkRule) createRisk(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink) types.Risk { title := "Unnecessary Communication Link titled " + commLink.Title + " at technical asset " + technicalAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: commLink.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + commLink.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + commLink.Id + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go b/pkg/security/risks/builtin/unnecessary-data-asset-rule.go similarity index 58% rename from risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go rename to pkg/security/risks/builtin/unnecessary-data-asset-rule.go index 2af7c618..dcb6cc3d 100644 --- a/risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go +++ b/pkg/security/risks/builtin/unnecessary-data-asset-rule.go @@ -1,15 +1,22 @@ -package unnecessary_data_asset +package builtin import ( - "github.com/threagile/threagile/model" "sort" + + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UnnecessaryDataAssetRule struct{} + +func NewUnnecessaryDataAssetRule() *UnnecessaryDataAssetRule { + return &UnnecessaryDataAssetRule{} +} + +func (*UnnecessaryDataAssetRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unnecessary-data-asset", Title: "Unnecessary Data Asset", - Description: "When a data asset is not processed or stored by any data assets and also not transferred by any " + + Description: "When a data asset is not processed by any data assets and also not transferred by any " + "communication links, this is an indicator for an unnecessary data asset (or for an incomplete model).", Impact: "If this risk is unmitigated, attackers might be able to access unnecessary data assets using " + "other vulnerabilities.", @@ -18,30 +25,30 @@ func Category() model.RiskCategory { Action: "Attack Surface Reduction", Mitigation: "Try to avoid having data assets that are not required/used.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "Modelled data assets not processed or stored by any data assets and also not transferred by any " + + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, + DetectionLogic: "Modelled data assets not processed by any data assets and also not transferred by any " + "communication links.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*UnnecessaryDataAssetRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) +func (r *UnnecessaryDataAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) // first create them in memory - otherwise in Go ranging over map is random order // range over them in sorted (hence re-producible) way: unusedDataAssetIDs := make(map[string]bool) - for k := range model.ParsedModelRoot.DataAssets { + for k := range input.DataAssets { unusedDataAssetIDs[k] = true } - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { + for _, technicalAsset := range input.TechnicalAssets { for _, processedDataAssetID := range technicalAsset.DataAssetsProcessed { delete(unusedDataAssetIDs, processedDataAssetID) } @@ -63,24 +70,24 @@ func GenerateRisks() []model.Risk { } sort.Strings(keys) for _, unusedDataAssetID := range keys { - risks = append(risks, createRisk(unusedDataAssetID)) + risks = append(risks, r.createRisk(input, unusedDataAssetID)) } return risks } -func createRisk(unusedDataAssetID string) model.Risk { - unusedDataAsset := model.ParsedModelRoot.DataAssets[unusedDataAssetID] +func (r *UnnecessaryDataAssetRule) createRisk(input *types.ParsedModel, unusedDataAssetID string) types.Risk { + unusedDataAsset := input.DataAssets[unusedDataAssetID] title := "Unnecessary Data Asset named " + unusedDataAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantDataAssetId: unusedDataAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{unusedDataAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + unusedDataAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + unusedDataAsset.Id return risk } diff --git a/risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go b/pkg/security/risks/builtin/unnecessary-data-transfer-rule.go similarity index 56% rename from risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go rename to pkg/security/risks/builtin/unnecessary-data-transfer-rule.go index f33c58a0..51a36a48 100644 --- a/risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go +++ b/pkg/security/risks/builtin/unnecessary-data-transfer-rule.go @@ -1,12 +1,19 @@ -package unnecessary_data_transfer +package builtin import ( - "github.com/threagile/threagile/model" "sort" + + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UnnecessaryDataTransferRule struct{} + +func NewUnnecessaryDataTransferRule() *UnnecessaryDataTransferRule { + return &UnnecessaryDataTransferRule{} +} + +func (*UnnecessaryDataTransferRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unnecessary-data-transfer", Title: "Unnecessary Data Transfer", Description: "When a technical asset sends or receives data assets, which it neither processes or stores this is " + @@ -17,69 +24,69 @@ func Category() model.RiskCategory { CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", Action: "Attack Surface Reduction", Mitigation: "Try to avoid sending or receiving sensitive data assets which are not required (i.e. neither " + - "processed or stored) by the involved technical asset.", + "processed) by the involved technical asset.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "In-scope technical assets sending or receiving sensitive data assets which are neither processed nor " + "stored by the technical asset are flagged with this risk. The risk rating (low or medium) depends on the " + "confidentiality, integrity, and availability rating of the technical asset. Monitoring data is exempted from this risk.", RiskAssessment: "The risk assessment is depending on the confidentiality and integrity rating of the transferred data asset " + - "either " + model.LowSeverity.String() + " or " + model.MediumSeverity.String() + ".", + "either " + types.LowSeverity.String() + " or " + types.MediumSeverity.String() + ".", FalsePositives: "Technical assets missing the model entries of either processing or storing the mentioned data assets " + "can be considered as false positives (incomplete models) after individual review. These should then be addressed by " + - "completing the model so that all necessary data assets are processed and/or stored by the technical asset involved.", + "completing the model so that all necessary data assets are processed by the technical asset involved.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*UnnecessaryDataTransferRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *UnnecessaryDataTransferRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if technicalAsset.OutOfScope { continue } // outgoing data flows for _, outgoingDataFlow := range technicalAsset.CommunicationLinks { - targetAsset := model.ParsedModelRoot.TechnicalAssets[outgoingDataFlow.TargetId] + targetAsset := input.TechnicalAssets[outgoingDataFlow.TargetId] if targetAsset.Technology.IsUnnecessaryDataTolerated() { continue } - risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, outgoingDataFlow, false) + risks = r.checkRisksAgainstTechnicalAsset(input, risks, technicalAsset, outgoingDataFlow, false) } // incoming data flows - commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - sort.Sort(model.ByTechnicalCommunicationLinkIdSort(commLinks)) + commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] + sort.Sort(types.ByTechnicalCommunicationLinkIdSort(commLinks)) for _, incomingDataFlow := range commLinks { - targetAsset := model.ParsedModelRoot.TechnicalAssets[incomingDataFlow.SourceId] + targetAsset := input.TechnicalAssets[incomingDataFlow.SourceId] if targetAsset.Technology.IsUnnecessaryDataTolerated() { continue } - risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, incomingDataFlow, true) + risks = r.checkRisksAgainstTechnicalAsset(input, risks, technicalAsset, incomingDataFlow, true) } } return risks } -func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.TechnicalAsset, - dataFlow model.CommunicationLink, inverseDirection bool) []model.Risk { +func (r *UnnecessaryDataTransferRule) checkRisksAgainstTechnicalAsset(input *types.ParsedModel, risks []types.Risk, technicalAsset types.TechnicalAsset, + dataFlow types.CommunicationLink, inverseDirection bool) []types.Risk { for _, transferredDataAssetId := range dataFlow.DataAssetsSent { if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) { - transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId] + transferredDataAsset := input.DataAssets[transferredDataAssetId] //fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" sent via "+dataFlow.Id+"\n") - if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical { + if transferredDataAsset.Confidentiality >= types.Confidential || transferredDataAsset.Integrity >= types.Critical { commPartnerId := dataFlow.TargetId if inverseDirection { commPartnerId = dataFlow.SourceId } - commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId] - risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) + commPartnerAsset := input.TechnicalAssets[commPartnerId] + risk := r.createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) if isNewRisk(risks, risk) { risks = append(risks, risk) } @@ -88,15 +95,15 @@ func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.Te } for _, transferredDataAssetId := range dataFlow.DataAssetsReceived { if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) { - transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId] + transferredDataAsset := input.DataAssets[transferredDataAssetId] //fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" received via "+dataFlow.Id+"\n") - if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical { + if transferredDataAsset.Confidentiality >= types.Confidential || transferredDataAsset.Integrity >= types.Critical { commPartnerId := dataFlow.TargetId if inverseDirection { commPartnerId = dataFlow.SourceId } - commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId] - risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) + commPartnerAsset := input.TechnicalAssets[commPartnerId] + risk := r.createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) if isNewRisk(risks, risk) { risks = append(risks, risk) } @@ -106,7 +113,7 @@ func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.Te return risks } -func isNewRisk(risks []model.Risk, risk model.Risk) bool { +func isNewRisk(risks []types.Risk, risk types.Risk) bool { for _, check := range risks { if check.SyntheticId == risk.SyntheticId { return false @@ -115,27 +122,27 @@ func isNewRisk(risks []model.Risk, risk model.Risk) bool { return true } -func createRisk(technicalAsset model.TechnicalAsset, dataAssetTransferred model.DataAsset, commPartnerAsset model.TechnicalAsset) model.Risk { - moreRisky := dataAssetTransferred.Confidentiality == model.StrictlyConfidential || dataAssetTransferred.Integrity == model.MissionCritical +func (r *UnnecessaryDataTransferRule) createRisk(technicalAsset types.TechnicalAsset, dataAssetTransferred types.DataAsset, commPartnerAsset types.TechnicalAsset) types.Risk { + moreRisky := dataAssetTransferred.Confidentiality == types.StrictlyConfidential || dataAssetTransferred.Integrity == types.MissionCritical - impact := model.LowImpact + impact := types.LowImpact if moreRisky { - impact = model.MediumImpact + impact = types.MediumImpact } title := "Unnecessary Data Transfer of " + dataAssetTransferred.Title + " data at " + technicalAsset.Title + " " + "from/to " + commPartnerAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, impact), + ExploitationLikelihood: types.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantDataAssetId: dataAssetTransferred.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + dataAssetTransferred.Id + "@" + technicalAsset.Id + "@" + commPartnerAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + dataAssetTransferred.Id + "@" + technicalAsset.Id + "@" + commPartnerAsset.Id return risk } diff --git a/risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go b/pkg/security/risks/builtin/unnecessary-technical-asset-rule.go similarity index 53% rename from risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go rename to pkg/security/risks/builtin/unnecessary-technical-asset-rule.go index 012117e8..c075407e 100644 --- a/risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go +++ b/pkg/security/risks/builtin/unnecessary-technical-asset-rule.go @@ -1,14 +1,20 @@ -package unnecessary_technical_asset +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UnnecessaryTechnicalAssetRule struct{} + +func NewUnnecessaryTechnicalAssetRule() *UnnecessaryTechnicalAssetRule { + return &UnnecessaryTechnicalAssetRule{} +} + +func (*UnnecessaryTechnicalAssetRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "unnecessary-technical-asset", Title: "Unnecessary Technical Asset", - Description: "When a technical asset does not process or store any data assets, this is " + + Description: "When a technical asset does not process any data assets, this is " + "an indicator for an unnecessary technical asset (or for an incomplete model). " + "This is also the case if the asset has no communication links (either outgoing or incoming).", Impact: "If this risk is unmitigated, attackers might be able to target unnecessary technical assets.", @@ -17,44 +23,44 @@ func Category() model.RiskCategory { Action: "Attack Surface Reduction", Mitigation: "Try to avoid using technical assets that do not process or store anything.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "Technical assets not processing or storing any data assets.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*UnnecessaryTechnicalAssetRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *UnnecessaryTechnicalAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if len(technicalAsset.DataAssetsProcessed) == 0 && len(technicalAsset.DataAssetsStored) == 0 || - (len(technicalAsset.CommunicationLinks) == 0 && len(model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]) == 0) { - risks = append(risks, createRisk(technicalAsset)) + (len(technicalAsset.CommunicationLinks) == 0 && len(input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]) == 0) { + risks = append(risks, r.createRisk(technicalAsset)) } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *UnnecessaryTechnicalAssetRule) createRisk(technicalAsset types.TechnicalAsset) types.Risk { title := "Unnecessary Technical Asset named " + technicalAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go b/pkg/security/risks/builtin/untrusted-deserialization-rule.go similarity index 59% rename from risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go rename to pkg/security/risks/builtin/untrusted-deserialization-rule.go index 5da7f2db..d6e7aeb7 100644 --- a/risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go +++ b/pkg/security/risks/builtin/untrusted-deserialization-rule.go @@ -1,11 +1,17 @@ -package untrusted_deserialization +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type UntrustedDeserializationRule struct{} + +func NewUntrustedDeserializationRule() *UntrustedDeserializationRule { + return &UntrustedDeserializationRule{} +} + +func (*UntrustedDeserializationRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "untrusted-deserialization", Title: "Untrusted Deserialization", Description: "When a technical asset accepts data in a specific serialized form (like Java or .NET serialization), " + @@ -21,10 +27,10 @@ func Category() model.RiskCategory { "Alternatively a strict whitelisting approach of the classes/types/values to deserialize might help as well. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Tampering, + Function: types.Architecture, + STRIDE: types.Tampering, DetectionLogic: "In-scope technical assets accepting serialization data formats (including EJB and RMI protocols).", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.", FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) data deserialized can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, @@ -32,68 +38,68 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*UntrustedDeserializationRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *UntrustedDeserializationRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if technicalAsset.OutOfScope { continue } hasOne, acrossTrustBoundary := false, false commLinkTitle := "" for _, format := range technicalAsset.DataFormatsAccepted { - if format == model.Serialization { + if format == types.Serialization { hasOne = true } } - if technicalAsset.Technology == model.EJB { + if technicalAsset.Technology == types.EJB { hasOne = true } // check for any incoming IIOP and JRMP protocols - for _, commLink := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { - if commLink.Protocol == model.IIOP || commLink.Protocol == model.IIOP_encrypted || - commLink.Protocol == model.JRMP || commLink.Protocol == model.JRMP_encrypted { + for _, commLink := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { + if commLink.Protocol == types.IIOP || commLink.Protocol == types.IiopEncrypted || + commLink.Protocol == types.JRMP || commLink.Protocol == types.JrmpEncrypted { hasOne = true - if commLink.IsAcrossTrustBoundaryNetworkOnly() { + if commLink.IsAcrossTrustBoundaryNetworkOnly(input) { acrossTrustBoundary = true commLinkTitle = commLink.Title } } } if hasOne { - risks = append(risks, createRisk(technicalAsset, acrossTrustBoundary, commLinkTitle)) + risks = append(risks, r.createRisk(input, technicalAsset, acrossTrustBoundary, commLinkTitle)) } } return risks } -func createRisk(technicalAsset model.TechnicalAsset, acrossTrustBoundary bool, commLinkTitle string) model.Risk { +func (r *UntrustedDeserializationRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, acrossTrustBoundary bool, commLinkTitle string) types.Risk { title := "Untrusted Deserialization risk at " + technicalAsset.Title + "" - impact := model.HighImpact - likelihood := model.Likely + impact := types.HighImpact + likelihood := types.Likely if acrossTrustBoundary { - likelihood = model.VeryLikely + likelihood = types.VeryLikely title += " across a trust boundary (at least via communication link " + commLinkTitle + ")" } - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.VeryHighImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.VeryHighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(likelihood, impact), ExploitationLikelihood: likelihood, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go b/pkg/security/risks/builtin/wrong-communication-link-content-rule.go similarity index 50% rename from risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go rename to pkg/security/risks/builtin/wrong-communication-link-content-rule.go index 3464e490..7f4997c7 100644 --- a/risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go +++ b/pkg/security/risks/builtin/wrong-communication-link-content-rule.go @@ -1,11 +1,17 @@ -package wrong_communication_link_content +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type WrongCommunicationLinkContentRule struct{} + +func NewWrongCommunicationLinkContentRule() *WrongCommunicationLinkContentRule { + return &WrongCommunicationLinkContentRule{} +} + +func (*WrongCommunicationLinkContentRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "wrong-communication-link-content", Title: "Wrong Communication Link Content", Description: "When a communication link is defined as readonly, but does not receive any data asset, " + @@ -17,69 +23,69 @@ func Category() model.RiskCategory { Mitigation: "Try to model the correct readonly flag and/or data sent/received of communication links. " + "Also try to use communication link types matching the target technology/machine types.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.InformationDisclosure, + Function: types.Architecture, + STRIDE: types.InformationDisclosure, DetectionLogic: "Communication links with inconsistent data assets being sent/received not matching their readonly flag or otherwise inconsistent protocols not matching the target technology type.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*WrongCommunicationLinkContentRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, techAsset := range model.ParsedModelRoot.TechnicalAssets { +func (r *WrongCommunicationLinkContentRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, techAsset := range input.TechnicalAssets { for _, commLink := range techAsset.CommunicationLinks { // check readonly consistency if commLink.Readonly { if len(commLink.DataAssetsReceived) == 0 { - risks = append(risks, createRisk(techAsset, commLink, + risks = append(risks, r.createRisk(techAsset, commLink, "(data assets sent/received not matching the communication link's readonly flag)")) } } else { if len(commLink.DataAssetsSent) == 0 { - risks = append(risks, createRisk(techAsset, commLink, + risks = append(risks, r.createRisk(techAsset, commLink, "(data assets sent/received not matching the communication link's readonly flag)")) } } // check for protocol inconsistencies - targetAsset := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId] - if commLink.Protocol == model.InProcessLibraryCall && targetAsset.Technology != model.Library { - risks = append(risks, createRisk(techAsset, commLink, - "(protocol type \""+model.InProcessLibraryCall.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+model.Library.String()+"\")")) + targetAsset := input.TechnicalAssets[commLink.TargetId] + if commLink.Protocol == types.InProcessLibraryCall && targetAsset.Technology != types.Library { + risks = append(risks, r.createRisk(techAsset, commLink, + "(protocol type \""+types.InProcessLibraryCall.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+types.Library.String()+"\")")) } - if commLink.Protocol == model.LocalFileAccess && targetAsset.Technology != model.LocalFileSystem { - risks = append(risks, createRisk(techAsset, commLink, - "(protocol type \""+model.LocalFileAccess.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+model.LocalFileSystem.String()+"\")")) + if commLink.Protocol == types.LocalFileAccess && targetAsset.Technology != types.LocalFileSystem { + risks = append(risks, r.createRisk(techAsset, commLink, + "(protocol type \""+types.LocalFileAccess.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+types.LocalFileSystem.String()+"\")")) } - if commLink.Protocol == model.ContainerSpawning && targetAsset.Machine != model.Container { - risks = append(risks, createRisk(techAsset, commLink, - "(protocol type \""+model.ContainerSpawning.String()+"\" does not match target machine type \""+targetAsset.Machine.String()+"\": expected \""+model.Container.String()+"\")")) + if commLink.Protocol == types.ContainerSpawning && targetAsset.Machine != types.Container { + risks = append(risks, r.createRisk(techAsset, commLink, + "(protocol type \""+types.ContainerSpawning.String()+"\" does not match target machine type \""+targetAsset.Machine.String()+"\": expected \""+types.Container.String()+"\")")) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink, reason string) model.Risk { +func (r *WrongCommunicationLinkContentRule) createRisk(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink, reason string) types.Risk { title := "Wrong Communication Link Content " + reason + " at " + technicalAsset.Title + " " + "regarding communication link " + commLink.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantCommunicationLinkId: commLink.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + commLink.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + commLink.Id return risk } diff --git a/risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go b/pkg/security/risks/builtin/wrong-trust-boundary-content.go similarity index 50% rename from risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go rename to pkg/security/risks/builtin/wrong-trust-boundary-content.go index 95801845..81959f8a 100644 --- a/risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go +++ b/pkg/security/risks/builtin/wrong-trust-boundary-content.go @@ -1,14 +1,20 @@ -package wrong_trust_boundary_content +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type WrongTrustBoundaryContentRule struct{} + +func NewWrongTrustBoundaryContentRule() *WrongTrustBoundaryContentRule { + return &WrongTrustBoundaryContentRule{} +} + +func (*WrongTrustBoundaryContentRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "wrong-trust-boundary-content", Title: "Wrong Trust Boundary Content", - Description: "When a trust boundary of type " + model.NetworkPolicyNamespaceIsolation.String() + " contains " + + Description: "When a trust boundary of type " + types.NetworkPolicyNamespaceIsolation.String() + " contains " + "non-container assets it is likely to be a model failure.", Impact: "If this potential model error is not fixed, some risks might not be visible.", ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", @@ -16,28 +22,28 @@ func Category() model.RiskCategory { Action: "Model Consistency", Mitigation: "Try to model the correct types of trust boundaries and data assets.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, + Function: types.Architecture, + STRIDE: types.ElevationOfPrivilege, DetectionLogic: "Trust boundaries which should only contain containers, but have different assets inside.", - RiskAssessment: model.LowSeverity.String(), + RiskAssessment: types.LowSeverity.String(), FalsePositives: "Usually no false positives as this looks like an incomplete model.", ModelFailurePossibleReason: true, CWE: 1008, } } -func SupportedTags() []string { +func (*WrongTrustBoundaryContentRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { - if trustBoundary.Type == model.NetworkPolicyNamespaceIsolation { +func (r *WrongTrustBoundaryContentRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, trustBoundary := range input.TrustBoundaries { + if trustBoundary.Type == types.NetworkPolicyNamespaceIsolation { for _, techAssetID := range trustBoundary.TechnicalAssetsInside { - techAsset := model.ParsedModelRoot.TechnicalAssets[techAssetID] - if techAsset.Machine != model.Container && techAsset.Machine != model.Serverless { - risks = append(risks, createRisk(techAsset)) + techAsset := input.TechnicalAssets[techAssetID] + if techAsset.Machine != types.Container && techAsset.Machine != types.Serverless { + risks = append(risks, r.createRisk(techAsset)) } } } @@ -45,18 +51,18 @@ func GenerateRisks() []model.Risk { return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *WrongTrustBoundaryContentRule) createRisk(technicalAsset types.TechnicalAsset) types.Risk { title := "Wrong Trust Boundary Content (non-container asset inside container trust boundary) at " + technicalAsset.Title + "" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: model.LowImpact, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact), + ExploitationLikelihood: types.Unlikely, + ExploitationImpact: types.LowImpact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, + DataBreachProbability: types.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/risks/built-in/xml-external-entity/xml-external-entity-rule.go b/pkg/security/risks/builtin/xml-external-entity-rule.go similarity index 59% rename from risks/built-in/xml-external-entity/xml-external-entity-rule.go rename to pkg/security/risks/builtin/xml-external-entity-rule.go index e6e4778a..e548ea19 100644 --- a/risks/built-in/xml-external-entity/xml-external-entity-rule.go +++ b/pkg/security/risks/builtin/xml-external-entity-rule.go @@ -1,11 +1,17 @@ -package xml_external_entity +package builtin import ( - "github.com/threagile/threagile/model" + "github.com/threagile/threagile/pkg/security/types" ) -func Category() model.RiskCategory { - return model.RiskCategory{ +type XmlExternalEntityRule struct{} + +func NewXmlExternalEntityRule() *XmlExternalEntityRule { + return &XmlExternalEntityRule{} +} + +func (*XmlExternalEntityRule) Category() types.RiskCategory { + return types.RiskCategory{ Id: "xml-external-entity", Title: "XML External Entity (XXE)", Description: "When a technical asset accepts data in XML format, XML External Entity (XXE) risks might arise.", @@ -18,10 +24,10 @@ func Category() model.RiskCategory { Mitigation: "Apply hardening of all XML parser instances in order to stay safe from XML External Entity (XXE) vulnerabilities. " + "When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Development, - STRIDE: model.InformationDisclosure, + Function: types.Development, + STRIDE: types.InformationDisclosure, DetectionLogic: "In-scope technical assets accepting XML data formats.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored. " + + RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed. " + "Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF (and XXE vulnerabilities are often also SSRF vulnerabilities).", FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) XML data can be considered " + "as false positives after individual review.", @@ -30,44 +36,44 @@ func Category() model.RiskCategory { } } -func SupportedTags() []string { +func (*XmlExternalEntityRule) SupportedTags() []string { return []string{} } -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] +func (r *XmlExternalEntityRule) GenerateRisks(input *types.ParsedModel) []types.Risk { + risks := make([]types.Risk, 0) + for _, id := range input.SortedTechnicalAssetIDs() { + technicalAsset := input.TechnicalAssets[id] if technicalAsset.OutOfScope { continue } for _, format := range technicalAsset.DataFormatsAccepted { - if format == model.XML { - risks = append(risks, createRisk(technicalAsset)) + if format == types.XML { + risks = append(risks, r.createRisk(input, technicalAsset)) } } } return risks } -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { +func (r *XmlExternalEntityRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk { title := "XML External Entity (XXE) risk at " + technicalAsset.Title + "" - impact := model.MediumImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical || - technicalAsset.HighestAvailability() == model.MissionCritical { - impact = model.HighImpact + impact := types.MediumImpact + if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || + technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical || + technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical { + impact = types.HighImpact } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.VeryLikely, impact), - ExploitationLikelihood: model.VeryLikely, + risk := types.Risk{ + CategoryId: r.Category().Id, + Severity: types.CalculateSeverity(types.VeryLikely, impact), + ExploitationLikelihood: types.VeryLikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Probable, + DataBreachProbability: types.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: use the same logic here as for SSRF rule, as XXE is also SSRF ;) } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id return risk } diff --git a/pkg/security/risks/risks.go b/pkg/security/risks/risks.go new file mode 100644 index 00000000..33fae818 --- /dev/null +++ b/pkg/security/risks/risks.go @@ -0,0 +1,59 @@ +package risks + +import ( + "github.com/threagile/threagile/pkg/security/risks/builtin" + "github.com/threagile/threagile/pkg/security/types" +) + +type RiskRule interface { + Category() types.RiskCategory + SupportedTags() []string + GenerateRisks(*types.ParsedModel) []types.Risk +} + +func GetBuiltInRiskRules() []RiskRule { + return []RiskRule{ + builtin.NewAccidentalSecretLeakRule(), + builtin.NewCodeBackdooringRule(), + builtin.NewContainerBaseImageBackdooringRule(), + builtin.NewContainerPlatformEscapeRule(), + builtin.NewCrossSiteRequestForgeryRule(), + builtin.NewCrossSiteScriptingRule(), + builtin.NewDosRiskyAccessAcrossTrustBoundaryRule(), + builtin.NewIncompleteModelRule(), + builtin.NewLdapInjectionRule(), + builtin.NewMissingAuthenticationRule(), + builtin.NewMissingAuthenticationSecondFactorRule(builtin.NewMissingAuthenticationRule()), + builtin.NewMissingBuildInfrastructureRule(), + builtin.NewMissingCloudHardeningRule(), + builtin.NewMissingFileValidationRule(), + builtin.NewMissingHardeningRule(), + builtin.NewMissingIdentityPropagationRule(), + builtin.NewMissingIdentityProviderIsolationRule(), + builtin.NewMissingIdentityStoreRule(), + builtin.NewMissingNetworkSegmentationRule(), + builtin.NewMissingVaultRule(), + builtin.NewMissingVaultIsolationRule(), + builtin.NewMissingWafRule(), + builtin.NewMixedTargetsOnSharedRuntimeRule(), + builtin.NewPathTraversalRule(), + builtin.NewPushInsteadPullDeploymentRule(), + builtin.NewSearchQueryInjectionRule(), + builtin.NewServerSideRequestForgeryRule(), + builtin.NewServiceRegistryPoisoningRule(), + builtin.NewSqlNoSqlInjectionRule(), + builtin.NewUncheckedDeploymentRule(), + builtin.NewUnencryptedAssetRule(), + builtin.NewUnencryptedCommunicationRule(), + builtin.NewUnguardedAccessFromInternetRule(), + builtin.NewUnguardedDirectDatastoreAccessRule(), + builtin.NewUnnecessaryCommunicationLinkRule(), + builtin.NewUnnecessaryDataAssetRule(), + builtin.NewUnnecessaryDataTransferRule(), + builtin.NewUnnecessaryTechnicalAssetRule(), + builtin.NewUntrustedDeserializationRule(), + builtin.NewWrongCommunicationLinkContentRule(), + builtin.NewWrongTrustBoundaryContentRule(), + builtin.NewXmlExternalEntityRule(), + } +} diff --git a/pkg/security/types/authentication.go b/pkg/security/types/authentication.go new file mode 100644 index 00000000..95db6aeb --- /dev/null +++ b/pkg/security/types/authentication.go @@ -0,0 +1,111 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Authentication int + +const ( + NoneAuthentication Authentication = iota + Credentials + SessionId + Token + ClientCertificate + TwoFactor + Externalized +) + +func AuthenticationValues() []TypeEnum { + return []TypeEnum{ + NoneAuthentication, + Credentials, + SessionId, + Token, + ClientCertificate, + TwoFactor, + Externalized, + } +} + +var AuthenticationTypeDescription = [...]TypeDescription{ + {"none", "No authentication"}, + {"credentials", "Username and password, pin or passphrase"}, + {"session-id", "A server generated session id with limited life span"}, + {"token", "A server generated token. Containing session id, other data and is cryptographically signed"}, + {"client-certificate", "A certificate file stored on the client identifying this specific client"}, + {"two-factor", "Credentials plus another factor like a physical object (card) or biometrics"}, + {"externalized", "Some external company handles authentication"}, +} + +func ParseAuthentication(value string) (authentication Authentication, err error) { + value = strings.TrimSpace(value) + for _, candidate := range AuthenticationValues() { + if candidate.String() == value { + return candidate.(Authentication), err + } + } + return authentication, errors.New("Unable to parse into type: " + value) +} + +func (what Authentication) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + //return [...]string{"none", "credentials", "session-id", "token", "client-certificate", "two-factor", "externalized"}[what] + return AuthenticationTypeDescription[what].Name +} + +func (what Authentication) Explain() string { + return AuthenticationTypeDescription[what].Description +} + +func (what Authentication) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Authentication) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Authentication) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Authentication) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Authentication) find(value string) (Authentication, error) { + for index, description := range AuthenticationTypeDescription { + if strings.EqualFold(value, description.Name) { + return Authentication(index), nil + } + } + + return Authentication(0), fmt.Errorf("unknown authentication value %q", value) +} diff --git a/pkg/security/types/authentication_test.go b/pkg/security/types/authentication_test.go new file mode 100644 index 00000000..d0849790 --- /dev/null +++ b/pkg/security/types/authentication_test.go @@ -0,0 +1,64 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseAuthenticationTest struct { + input string + expected Authentication + expectedError error +} + +func TestParseAuthentication(t *testing.T) { + testCases := map[string]ParseAuthenticationTest{ + "none": { + input: "none", + expected: NoneAuthentication, + }, + "credentials": { + input: "credentials", + expected: Credentials, + }, + "session-id": { + input: "session-id", + expected: SessionId, + }, + "token": { + input: "token", + expected: Token, + }, + "client-certificate": { + input: "client-certificate", + expected: ClientCertificate, + }, + "two-factor": { + input: "two-factor", + expected: TwoFactor, + }, + "externalized": { + input: "externalized", + expected: Externalized, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseAuthentication(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/authorization.go b/pkg/security/types/authorization.go new file mode 100644 index 00000000..eb6c0a87 --- /dev/null +++ b/pkg/security/types/authorization.go @@ -0,0 +1,98 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Authorization int + +const ( + NoneAuthorization Authorization = iota + TechnicalUser + EndUserIdentityPropagation +) + +func AuthorizationValues() []TypeEnum { + return []TypeEnum{ + NoneAuthorization, + TechnicalUser, + EndUserIdentityPropagation, + } +} + +var AuthorizationTypeDescription = [...]TypeDescription{ + {"none", "No authorization"}, + {"technical-user", "Technical user (service-to-service) like DB user credentials"}, + {"enduser-identity-propagation", "Identity of end user propagates to this service"}, +} + +func ParseAuthorization(value string) (authorization Authorization, err error) { + value = strings.TrimSpace(value) + for _, candidate := range AuthorizationValues() { + if candidate.String() == value { + return candidate.(Authorization), err + } + } + return authorization, errors.New("Unable to parse into type: " + value) +} + +func (what Authorization) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return AuthorizationTypeDescription[what].Name +} + +func (what Authorization) Explain() string { + return AuthorizationTypeDescription[what].Description +} + +func (what Authorization) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Authorization) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Authorization) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Authorization) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Authorization) find(value string) (Authorization, error) { + for index, description := range AuthorizationTypeDescription { + if strings.EqualFold(value, description.Name) { + return Authorization(index), nil + } + } + + return Authorization(0), fmt.Errorf("unknown authorization value %q", value) +} diff --git a/pkg/security/types/authorization_test.go b/pkg/security/types/authorization_test.go new file mode 100644 index 00000000..c4d274b4 --- /dev/null +++ b/pkg/security/types/authorization_test.go @@ -0,0 +1,48 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseAuthorizationTest struct { + input string + expected Authorization + expectedError error +} + +func TestParseAuthorization(t *testing.T) { + testCases := map[string]ParseAuthorizationTest{ + "none": { + input: "none", + expected: NoneAuthorization, + }, + "technical-user": { + input: "technical-user", + expected: TechnicalUser, + }, + "enduser-identity-propagation": { + input: "enduser-identity-propagation", + expected: EndUserIdentityPropagation, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseAuthorization(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/communication_link.go b/pkg/security/types/communication_link.go new file mode 100644 index 00000000..cabf4255 --- /dev/null +++ b/pkg/security/types/communication_link.go @@ -0,0 +1,144 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "sort" +) + +type CommunicationLink struct { + Id string `json:"id,omitempty" yaml:"id,omitempty"` + SourceId string `json:"source_id,omitempty" yaml:"source_id,omitempty"` + TargetId string `json:"target_id,omitempty" yaml:"target_id,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Protocol Protocol `json:"protocol,omitempty" yaml:"protocol,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + VPN bool `json:"vpn,omitempty" yaml:"vpn,omitempty"` + IpFiltered bool `json:"ip_filtered,omitempty" yaml:"ip_filtered,omitempty"` + Readonly bool `json:"readonly,omitempty" yaml:"readonly,omitempty"` + Authentication Authentication `json:"authentication,omitempty" yaml:"authentication,omitempty"` + Authorization Authorization `json:"authorization,omitempty" yaml:"authorization,omitempty"` + Usage Usage `json:"usage,omitempty" yaml:"usage,omitempty"` + DataAssetsSent []string `json:"data_assets_sent,omitempty" yaml:"data_assets_sent,omitempty"` + DataAssetsReceived []string `json:"data_assets_received,omitempty" yaml:"data_assets_received,omitempty"` + DiagramTweakWeight int `json:"diagram_tweak_weight,omitempty" yaml:"diagram_tweak_weight,omitempty"` + DiagramTweakConstraint bool `json:"diagram_tweak_constraint,omitempty" yaml:"diagram_tweak_constraint,omitempty"` +} + +func (what CommunicationLink) IsTaggedWithAny(tags ...string) bool { + return containsCaseInsensitiveAny(what.Tags, tags...) +} + +func (what CommunicationLink) IsTaggedWithBaseTag(baseTag string) bool { + return IsTaggedWithBaseTag(what.Tags, baseTag) +} + +func (what CommunicationLink) IsAcrossTrustBoundary(parsedModel *ParsedModel) bool { + trustBoundaryOfSourceAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId] + trustBoundaryOfTargetAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId] + return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id +} + +func (what CommunicationLink) IsAcrossTrustBoundaryNetworkOnly(parsedModel *ParsedModel) bool { + trustBoundaryOfSourceAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId] + if !trustBoundaryOfSourceAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then + trustBoundaryOfSourceAsset = parsedModel.TrustBoundaries[trustBoundaryOfSourceAsset.ParentTrustBoundaryID(parsedModel)] + } + trustBoundaryOfTargetAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId] + if !trustBoundaryOfTargetAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then + trustBoundaryOfTargetAsset = parsedModel.TrustBoundaries[trustBoundaryOfTargetAsset.ParentTrustBoundaryID(parsedModel)] + } + return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id && trustBoundaryOfTargetAsset.Type.IsNetworkBoundary() +} + +func (what CommunicationLink) HighestConfidentiality(parsedModel *ParsedModel) Confidentiality { + highest := Public + for _, dataId := range what.DataAssetsSent { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Confidentiality > highest { + highest = dataAsset.Confidentiality + } + } + for _, dataId := range what.DataAssetsReceived { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Confidentiality > highest { + highest = dataAsset.Confidentiality + } + } + return highest +} + +func (what CommunicationLink) HighestIntegrity(parsedModel *ParsedModel) Criticality { + highest := Archive + for _, dataId := range what.DataAssetsSent { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Integrity > highest { + highest = dataAsset.Integrity + } + } + for _, dataId := range what.DataAssetsReceived { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Integrity > highest { + highest = dataAsset.Integrity + } + } + return highest +} + +func (what CommunicationLink) HighestAvailability(parsedModel *ParsedModel) Criticality { + highest := Archive + for _, dataId := range what.DataAssetsSent { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Availability > highest { + highest = dataAsset.Availability + } + } + for _, dataId := range what.DataAssetsReceived { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Availability > highest { + highest = dataAsset.Availability + } + } + return highest +} + +func (what CommunicationLink) DataAssetsSentSorted(parsedModel *ParsedModel) []DataAsset { + result := make([]DataAsset, 0) + for _, assetID := range what.DataAssetsSent { + result = append(result, parsedModel.DataAssets[assetID]) + } + sort.Sort(byDataAssetTitleSort(result)) + return result +} + +func (what CommunicationLink) DataAssetsReceivedSorted(parsedModel *ParsedModel) []DataAsset { + result := make([]DataAsset, 0) + for _, assetID := range what.DataAssetsReceived { + result = append(result, parsedModel.DataAssets[assetID]) + } + sort.Sort(byDataAssetTitleSort(result)) + return result +} + +func (what CommunicationLink) IsBidirectional() bool { + return len(what.DataAssetsSent) > 0 && len(what.DataAssetsReceived) > 0 +} + +type ByTechnicalCommunicationLinkIdSort []CommunicationLink + +func (what ByTechnicalCommunicationLinkIdSort) Len() int { return len(what) } +func (what ByTechnicalCommunicationLinkIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTechnicalCommunicationLinkIdSort) Less(i, j int) bool { + return what[i].Id > what[j].Id +} + +type ByTechnicalCommunicationLinkTitleSort []CommunicationLink + +func (what ByTechnicalCommunicationLinkTitleSort) Len() int { return len(what) } +func (what ByTechnicalCommunicationLinkTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTechnicalCommunicationLinkTitleSort) Less(i, j int) bool { + return what[i].Title > what[j].Title +} diff --git a/pkg/security/types/confidentiality.go b/pkg/security/types/confidentiality.go new file mode 100644 index 00000000..fad041ea --- /dev/null +++ b/pkg/security/types/confidentiality.go @@ -0,0 +1,138 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Confidentiality int + +const ( + Public Confidentiality = iota + Internal + Restricted + Confidential + StrictlyConfidential +) + +func ConfidentialityValues() []TypeEnum { + return []TypeEnum{ + Public, + Internal, + Restricted, + Confidential, + StrictlyConfidential, + } +} + +func ParseConfidentiality(value string) (confidentiality Confidentiality, err error) { + value = strings.TrimSpace(value) + for _, candidate := range ConfidentialityValues() { + if candidate.String() == value { + return candidate.(Confidentiality), err + } + } + return confidentiality, errors.New("Unable to parse into type: " + value) +} + +var ConfidentialityTypeDescription = [...]TypeDescription{ + {"public", "Public available information"}, + {"internal", "(Company) internal information - but all people in the institution can access it"}, + {"restricted", "Internal and with restricted access"}, + {"confidential", "Only a few selected people have access"}, + {"strictly-confidential", "Highest secrecy level"}, +} + +func (what Confidentiality) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return ConfidentialityTypeDescription[what].Name +} + +func (what Confidentiality) Explain() string { + return ConfidentialityTypeDescription[what].Description +} + +func (what Confidentiality) AttackerAttractivenessForAsset() float64 { + // fibonacci starting at 8 + return [...]float64{8, 13, 21, 34, 55}[what] +} +func (what Confidentiality) AttackerAttractivenessForProcessedOrStoredData() float64 { + // fibonacci starting at 5 + return [...]float64{5, 8, 13, 21, 34}[what] +} +func (what Confidentiality) AttackerAttractivenessForInOutTransferredData() float64 { + // fibonacci starting at 2 + return [...]float64{2, 3, 5, 8, 13}[what] +} + +func (what Confidentiality) RatingStringInScale() string { + result := "(rated " + if what == Public { + result += "1" + } + if what == Internal { + result += "2" + } + if what == Restricted { + result += "3" + } + if what == Confidential { + result += "4" + } + if what == StrictlyConfidential { + result += "5" + } + result += " in scale of 5)" + return result +} + +func (what Confidentiality) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Confidentiality) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Confidentiality) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Confidentiality) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Confidentiality) find(value string) (Confidentiality, error) { + for index, description := range ConfidentialityTypeDescription { + if strings.EqualFold(value, description.Name) { + return Confidentiality(index), nil + } + } + + return Confidentiality(0), fmt.Errorf("unknown confidentiality value %q", value) +} diff --git a/pkg/security/types/confidentiality_test.go b/pkg/security/types/confidentiality_test.go new file mode 100644 index 00000000..2edacf6a --- /dev/null +++ b/pkg/security/types/confidentiality_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseConfidentialityTest struct { + input string + expected Confidentiality + expectedError error +} + +func TestParseConfidenitality(t *testing.T) { + testCases := map[string]ParseConfidentialityTest{ + "public": { + input: "public", + expected: Public, + }, + "internal": { + input: "internal", + expected: Internal, + }, + "restricted": { + input: "restricted", + expected: Restricted, + }, + "confidential": { + input: "confidential", + expected: Confidential, + }, + "strictly-confidential": { + input: "strictly-confidential", + expected: StrictlyConfidential, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseConfidentiality(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/criticality.go b/pkg/security/types/criticality.go new file mode 100644 index 00000000..a9e221b7 --- /dev/null +++ b/pkg/security/types/criticality.go @@ -0,0 +1,138 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Criticality int + +const ( + Archive Criticality = iota + Operational + Important + Critical + MissionCritical +) + +func CriticalityValues() []TypeEnum { + return []TypeEnum{ + Archive, + Operational, + Important, + Critical, + MissionCritical, + } +} + +func ParseCriticality(value string) (criticality Criticality, err error) { + value = strings.TrimSpace(value) + for _, candidate := range CriticalityValues() { + if candidate.String() == value { + return candidate.(Criticality), err + } + } + return criticality, errors.New("Unable to parse into type: " + value) +} + +var CriticalityTypeDescription = [...]TypeDescription{ + {"archive", "Stored, not active"}, + {"operational", "If this fails, people will just have an ad-hoc coffee break until it is back"}, + {"important", "Issues here results in angry people"}, + {"critical", "Failure is really expensive or crippling"}, + {"mission-critical", "This must not fail"}, +} + +func (what Criticality) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return CriticalityTypeDescription[what].Name +} + +func (what Criticality) Explain() string { + return CriticalityTypeDescription[what].Description +} + +func (what Criticality) AttackerAttractivenessForAsset() float64 { + // fibonacci starting at 5 + return [...]float64{5, 8, 13, 21, 34}[what] +} +func (what Criticality) AttackerAttractivenessForProcessedOrStoredData() float64 { + // fibonacci starting at 3 + return [...]float64{3, 5, 8, 13, 21}[what] +} +func (what Criticality) AttackerAttractivenessForInOutTransferredData() float64 { + // fibonacci starting at 2 + return [...]float64{2, 3, 5, 8, 13}[what] +} + +func (what Criticality) RatingStringInScale() string { + result := "(rated " + if what == Archive { + result += "1" + } + if what == Operational { + result += "2" + } + if what == Important { + result += "3" + } + if what == Critical { + result += "4" + } + if what == MissionCritical { + result += "5" + } + result += " in scale of 5)" + return result +} + +func (what Criticality) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Criticality) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Criticality) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Criticality) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Criticality) find(value string) (Criticality, error) { + for index, description := range CriticalityTypeDescription { + if strings.EqualFold(value, description.Name) { + return Criticality(index), nil + } + } + + return Criticality(0), fmt.Errorf("unknown criticality value %q", value) +} diff --git a/pkg/security/types/criticality_test.go b/pkg/security/types/criticality_test.go new file mode 100644 index 00000000..55912084 --- /dev/null +++ b/pkg/security/types/criticality_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseCriticalityTest struct { + input string + expected Criticality + expectedError error +} + +func TestParseCriticality(t *testing.T) { + testCases := map[string]ParseCriticalityTest{ + "archive": { + input: "archive", + expected: Archive, + }, + "operational": { + input: "operational", + expected: Operational, + }, + "important": { + input: "important", + expected: Important, + }, + "critical": { + input: "critical", + expected: Critical, + }, + "mission-critical": { + input: "mission-critical", + expected: MissionCritical, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseCriticality(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/data_asset.go b/pkg/security/types/data_asset.go new file mode 100644 index 00000000..7fe1c46d --- /dev/null +++ b/pkg/security/types/data_asset.go @@ -0,0 +1,248 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "sort" +) + +type DataAsset struct { + Id string `yaml:"id,omitempty" json:"id,omitempty"` // TODO: tag here still required? + Title string `yaml:"title,omitempty" json:"title,omitempty"` // TODO: tag here still required? + Description string `yaml:"description,omitempty" json:"description,omitempty"` // TODO: tag here still required? + Usage Usage `yaml:"usage,omitempty" json:"usage,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + Origin string `yaml:"origin,omitempty" json:"origin,omitempty"` + Owner string `yaml:"owner,omitempty" json:"owner,omitempty"` + Quantity Quantity `yaml:"quantity,omitempty" json:"quantity,omitempty"` + Confidentiality Confidentiality `yaml:"confidentiality,omitempty" json:"confidentiality,omitempty"` + Integrity Criticality `yaml:"integrity,omitempty" json:"integrity,omitempty"` + Availability Criticality `yaml:"availability,omitempty" json:"availability,omitempty"` + JustificationCiaRating string `yaml:"justification_cia_rating,omitempty" json:"justification_cia_rating,omitempty"` +} + +func (what DataAsset) IsTaggedWithAny(tags ...string) bool { + return containsCaseInsensitiveAny(what.Tags, tags...) +} + +func (what DataAsset) IsTaggedWithBaseTag(baseTag string) bool { + return IsTaggedWithBaseTag(what.Tags, baseTag) +} + +/* +func (what DataAsset) IsAtRisk() bool { + for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() { + if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 { + return true + } + } + for _, techAsset := range what.StoredByTechnicalAssetsSorted() { + if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 { + return true + } + } + return false +} +*/ + +/* +func (what DataAsset) IdentifiedRiskSeverityStillAtRisk() RiskSeverity { + highestRiskSeverity := Low + for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() { + candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) + if candidateSeverity > highestRiskSeverity { + highestRiskSeverity = candidateSeverity + } + } + for _, techAsset := range what.StoredByTechnicalAssetsSorted() { + candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) + if candidateSeverity > highestRiskSeverity { + highestRiskSeverity = candidateSeverity + } + } + return highestRiskSeverity +} +*/ + +func (what DataAsset) IdentifiedRisksByResponsibleTechnicalAssetId(model *ParsedModel) map[string][]Risk { + uniqueTechAssetIDsResponsibleForThisDataAsset := make(map[string]interface{}) + for _, techAsset := range what.ProcessedByTechnicalAssetsSorted(model) { + if len(techAsset.GeneratedRisks(model)) > 0 { + uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true + } + } + for _, techAsset := range what.StoredByTechnicalAssetsSorted(model) { + if len(techAsset.GeneratedRisks(model)) > 0 { + uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true + } + } + + result := make(map[string][]Risk) + for techAssetId := range uniqueTechAssetIDsResponsibleForThisDataAsset { + result[techAssetId] = append(result[techAssetId], model.TechnicalAssets[techAssetId].GeneratedRisks(model)...) + } + return result +} + +func (what DataAsset) IsDataBreachPotentialStillAtRisk(parsedModel *ParsedModel) bool { + for _, risk := range FilteredByStillAtRisk(parsedModel) { + for _, techAsset := range risk.DataBreachTechnicalAssetIDs { + if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { + return true + } + } + } + return false +} + +func (what DataAsset) IdentifiedDataBreachProbability(parsedModel *ParsedModel) DataBreachProbability { + highestProbability := Improbable + for _, risk := range AllRisks(parsedModel) { + for _, techAsset := range risk.DataBreachTechnicalAssetIDs { + if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { + if risk.DataBreachProbability > highestProbability { + highestProbability = risk.DataBreachProbability + break + } + } + } + } + return highestProbability +} + +func (what DataAsset) IdentifiedDataBreachProbabilityStillAtRisk(parsedModel *ParsedModel) DataBreachProbability { + highestProbability := Improbable + for _, risk := range FilteredByStillAtRisk(parsedModel) { + for _, techAsset := range risk.DataBreachTechnicalAssetIDs { + if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { + if risk.DataBreachProbability > highestProbability { + highestProbability = risk.DataBreachProbability + break + } + } + } + } + return highestProbability +} + +func (what DataAsset) IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel *ParsedModel) []Risk { + result := make([]Risk, 0) + for _, risk := range FilteredByStillAtRisk(parsedModel) { + for _, techAsset := range risk.DataBreachTechnicalAssetIDs { + if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { + result = append(result, risk) + break + } + } + } + return result +} + +func (what DataAsset) IdentifiedDataBreachProbabilityRisks(parsedModel *ParsedModel) []Risk { + result := make([]Risk, 0) + for _, risk := range AllRisks(parsedModel) { + for _, techAsset := range risk.DataBreachTechnicalAssetIDs { + if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) { + result = append(result, risk) + break + } + } + } + return result +} + +func (what DataAsset) ProcessedByTechnicalAssetsSorted(parsedModel *ParsedModel) []TechnicalAsset { + result := make([]TechnicalAsset, 0) + for _, technicalAsset := range parsedModel.TechnicalAssets { + for _, candidateID := range technicalAsset.DataAssetsProcessed { + if candidateID == what.Id { + result = append(result, technicalAsset) + } + } + } + sort.Sort(ByTechnicalAssetTitleSort(result)) + return result +} + +func (what DataAsset) StoredByTechnicalAssetsSorted(parsedModel *ParsedModel) []TechnicalAsset { + result := make([]TechnicalAsset, 0) + for _, technicalAsset := range parsedModel.TechnicalAssets { + for _, candidateID := range technicalAsset.DataAssetsStored { + if candidateID == what.Id { + result = append(result, technicalAsset) + } + } + } + sort.Sort(ByTechnicalAssetTitleSort(result)) + return result +} + +func (what DataAsset) SentViaCommLinksSorted(parsedModel *ParsedModel) []CommunicationLink { + result := make([]CommunicationLink, 0) + for _, technicalAsset := range parsedModel.TechnicalAssets { + for _, commLink := range technicalAsset.CommunicationLinks { + for _, candidateID := range commLink.DataAssetsSent { + if candidateID == what.Id { + result = append(result, commLink) + } + } + } + } + sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) + return result +} + +func (what DataAsset) ReceivedViaCommLinksSorted(parsedModel *ParsedModel) []CommunicationLink { + result := make([]CommunicationLink, 0) + for _, technicalAsset := range parsedModel.TechnicalAssets { + for _, commLink := range technicalAsset.CommunicationLinks { + for _, candidateID := range commLink.DataAssetsReceived { + if candidateID == what.Id { + result = append(result, commLink) + } + } + } + } + sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) + return result +} + +func SortByDataAssetDataBreachProbabilityAndTitle(parsedModel *ParsedModel, assets []DataAsset) { + sort.Slice(assets, func(i, j int) bool { + highestDataBreachProbabilityLeft := assets[i].IdentifiedDataBreachProbability(parsedModel) + highestDataBreachProbabilityRight := assets[j].IdentifiedDataBreachProbability(parsedModel) + if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight { + return assets[i].Title < assets[j].Title + } + return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight + }) +} + +func SortByDataAssetDataBreachProbabilityAndTitleStillAtRisk(parsedModel *ParsedModel, assets []DataAsset) { + sort.Slice(assets, func(i, j int) bool { + risksLeft := assets[i].IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel) + risksRight := assets[j].IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel) + highestDataBreachProbabilityLeft := assets[i].IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) + highestDataBreachProbabilityRight := assets[j].IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) + if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight { + if len(risksLeft) == 0 && len(risksRight) > 0 { + return false + } + if len(risksLeft) > 0 && len(risksRight) == 0 { + return true + } + return assets[i].Title < assets[j].Title + } + return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight + }) +} + +type ByDataAssetTitleSort []DataAsset + +func (what ByDataAssetTitleSort) Len() int { return len(what) } +func (what ByDataAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByDataAssetTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} diff --git a/pkg/security/types/data_breach_probability.go b/pkg/security/types/data_breach_probability.go new file mode 100644 index 00000000..1bd01a1f --- /dev/null +++ b/pkg/security/types/data_breach_probability.go @@ -0,0 +1,106 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type DataBreachProbability int + +const ( + Improbable DataBreachProbability = iota + Possible + Probable +) + +func DataBreachProbabilityValues() []TypeEnum { + return []TypeEnum{ + Improbable, + Possible, + Probable, + } +} + +var DataBreachProbabilityTypeDescription = [...]TypeDescription{ + {"improbable", "Improbable"}, + {"possible", "Possible"}, + {"probable", "Probable"}, +} + +func ParseDataBreachProbability(value string) (dataBreachProbability DataBreachProbability, err error) { + value = strings.TrimSpace(value) + if value == "" { + return Possible, err + } + + for _, candidate := range DataBreachProbabilityValues() { + if candidate.String() == value { + return candidate.(DataBreachProbability), err + } + } + return dataBreachProbability, errors.New("Unable to parse into type: " + value) +} + +func (what DataBreachProbability) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return DataBreachProbabilityTypeDescription[what].Name +} + +func (what DataBreachProbability) Explain() string { + return DataBreachProbabilityTypeDescription[what].Description +} + +func (what DataBreachProbability) Title() string { + return [...]string{"Improbable", "Possible", "Probable"}[what] +} + +func (what DataBreachProbability) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *DataBreachProbability) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what DataBreachProbability) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *DataBreachProbability) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what DataBreachProbability) find(value string) (DataBreachProbability, error) { + for index, description := range DataBreachProbabilityTypeDescription { + if strings.EqualFold(value, description.Name) { + return DataBreachProbability(index), nil + } + } + + return DataBreachProbability(0), fmt.Errorf("unknown data breach probability value %q", value) +} diff --git a/pkg/security/types/data_breach_probability_test.go b/pkg/security/types/data_breach_probability_test.go new file mode 100644 index 00000000..38feaaba --- /dev/null +++ b/pkg/security/types/data_breach_probability_test.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseDataBreachProbabilityTest struct { + input string + expected DataBreachProbability + expectedError error +} + +func TestParseDataBreachProbability(t *testing.T) { + testCases := map[string]ParseDataBreachProbabilityTest{ + "improbable": { + input: "improbable", + expected: Improbable, + }, + "possible": { + input: "possible", + expected: Possible, + }, + "probable": { + input: "probable", + expected: Probable, + }, + "default": { + input: "", + expected: Possible, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseDataBreachProbability(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/data_format.go b/pkg/security/types/data_format.go new file mode 100644 index 00000000..52b1aaae --- /dev/null +++ b/pkg/security/types/data_format.go @@ -0,0 +1,121 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type DataFormat int + +const ( + JSON DataFormat = iota + XML + Serialization + File + CSV +) + +func DataFormatValues() []TypeEnum { + return []TypeEnum{ + JSON, + XML, + Serialization, + File, + CSV, + } +} + +var DataFormatTypeDescription = [...]TypeDescription{ + {"json", "JSON"}, + {"xml", "XML"}, + {"serialization", "Serialized program objects"}, + {"file", "Specific file types for data"}, + {"csv", "CSV"}, +} + +func ParseDataFormat(value string) (dataFormat DataFormat, err error) { + value = strings.TrimSpace(value) + for _, candidate := range DataFormatValues() { + if candidate.String() == value { + return candidate.(DataFormat), err + } + } + return dataFormat, errors.New("Unable to parse into type: " + value) +} + +func (what DataFormat) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return DataFormatTypeDescription[what].Name +} + +func (what DataFormat) Explain() string { + return DataFormatTypeDescription[what].Description +} + +func (what DataFormat) Title() string { + return [...]string{"JSON", "XML", "Serialization", "File", "CSV"}[what] +} + +func (what DataFormat) Description() string { + return [...]string{"JSON marshalled object data", "XML structured data", "Serialization-based object graphs", + "File input/uploads", "CSV tabular data"}[what] +} + +func (what DataFormat) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *DataFormat) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what DataFormat) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *DataFormat) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what DataFormat) find(value string) (DataFormat, error) { + for index, description := range DataFormatTypeDescription { + if strings.EqualFold(value, description.Name) { + return DataFormat(index), nil + } + } + + return DataFormat(0), fmt.Errorf("unknown data format value %q", value) +} + +type ByDataFormatAcceptedSort []DataFormat + +func (what ByDataFormatAcceptedSort) Len() int { return len(what) } +func (what ByDataFormatAcceptedSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByDataFormatAcceptedSort) Less(i, j int) bool { + return what[i].String() < what[j].String() +} diff --git a/pkg/security/types/data_format_test.go b/pkg/security/types/data_format_test.go new file mode 100644 index 00000000..df2ade98 --- /dev/null +++ b/pkg/security/types/data_format_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseDataFormatTest struct { + input string + expected DataFormat + expectedError error +} + +func TestParseDataFormat(t *testing.T) { + testCases := map[string]ParseDataFormatTest{ + "json": { + input: "json", + expected: JSON, + }, + "xml": { + input: "xml", + expected: XML, + }, + "serialization": { + input: "serialization", + expected: Serialization, + }, + "file": { + input: "file", + expected: File, + }, + "csv": { + input: "csv", + expected: CSV, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseDataFormat(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/date.go b/pkg/security/types/date.go new file mode 100644 index 00000000..d800963f --- /dev/null +++ b/pkg/security/types/date.go @@ -0,0 +1,40 @@ +package types + +import ( + "gopkg.in/yaml.v3" + "time" +) + +type Date struct { + time.Time +} + +func (what Date) MarshalJSON() ([]byte, error) { + return []byte(what.Format(`"2006-01-02"`)), nil +} + +func (what *Date) UnmarshalJSON(bytes []byte) error { + date, parseError := time.Parse(`"2006-01-02"`, string(bytes)) + if parseError != nil { + return parseError + } + + what.Time = date + + return nil +} + +func (what Date) MarshalYAML() (interface{}, error) { + return what.Format(`2006-01-02`), nil +} + +func (what *Date) UnmarshalYAML(node *yaml.Node) error { + date, parseError := time.Parse(`2006-01-02`, node.Value) + if parseError != nil { + return parseError + } + + what.Time = date + + return nil +} diff --git a/pkg/security/types/encryption_style.go b/pkg/security/types/encryption_style.go new file mode 100644 index 00000000..32afa9f2 --- /dev/null +++ b/pkg/security/types/encryption_style.go @@ -0,0 +1,108 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type EncryptionStyle int + +const ( + NoneEncryption EncryptionStyle = iota + Transparent + DataWithSymmetricSharedKey + DataWithAsymmetricSharedKey + DataWithEndUserIndividualKey +) + +func EncryptionStyleValues() []TypeEnum { + return []TypeEnum{ + NoneEncryption, + Transparent, + DataWithSymmetricSharedKey, + DataWithAsymmetricSharedKey, + DataWithEndUserIndividualKey, + } +} + +func ParseEncryptionStyle(value string) (encryptionStyle EncryptionStyle, err error) { + value = strings.TrimSpace(value) + for _, candidate := range EncryptionStyleValues() { + if candidate.String() == value { + return candidate.(EncryptionStyle), err + } + } + return encryptionStyle, errors.New("Unable to parse into type: " + value) +} + +var EncryptionStyleTypeDescription = [...]TypeDescription{ + {"none", "No encryption"}, + {"transparent", "Encrypted data at rest"}, + {"data-with-symmetric-shared-key", "Both communication partners have the same key. This must be kept secret"}, + {"data-with-asymmetric-shared-key", "The key is split into public and private. Those two are shared between partners"}, + {"data-with-enduser-individual-key", "The key is (managed) by the end user"}, +} + +func (what EncryptionStyle) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return EncryptionStyleTypeDescription[what].Name +} + +func (what EncryptionStyle) Explain() string { + return EncryptionStyleTypeDescription[what].Description +} + +func (what EncryptionStyle) Title() string { + return [...]string{"None", "Transparent", "Data with Symmetric Shared Key", "Data with Asymmetric Shared Key", "Data with End-User Individual Key"}[what] +} + +func (what EncryptionStyle) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *EncryptionStyle) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what EncryptionStyle) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *EncryptionStyle) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what EncryptionStyle) find(value string) (EncryptionStyle, error) { + for index, description := range EncryptionStyleTypeDescription { + if strings.EqualFold(value, description.Name) { + return EncryptionStyle(index), nil + } + } + + return EncryptionStyle(0), fmt.Errorf("unknown encryption style value %q", value) +} diff --git a/pkg/security/types/encryption_style_test.go b/pkg/security/types/encryption_style_test.go new file mode 100644 index 00000000..36bdf560 --- /dev/null +++ b/pkg/security/types/encryption_style_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseEncryptionStyleTest struct { + input string + expected EncryptionStyle + expectedError error +} + +func TestParseEncryptionStyle(t *testing.T) { + testCases := map[string]ParseEncryptionStyleTest{ + "none": { + input: "none", + expected: NoneEncryption, + }, + "transparent": { + input: "transparent", + expected: Transparent, + }, + "data-with-symmetric-shared-key": { + input: "data-with-symmetric-shared-key", + expected: DataWithSymmetricSharedKey, + }, + "data-with-asymmetric-shared-key": { + input: "data-with-asymmetric-shared-key", + expected: DataWithAsymmetricSharedKey, + }, + "data-with-enduser-individual-key": { + input: "data-with-enduser-individual-key", + expected: DataWithEndUserIndividualKey, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseEncryptionStyle(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/helpers.go b/pkg/security/types/helpers.go new file mode 100644 index 00000000..0090195e --- /dev/null +++ b/pkg/security/types/helpers.go @@ -0,0 +1,54 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "regexp" + "strings" +) + +func MakeID(val string) string { + reg, _ := regexp.Compile("[^A-Za-z0-9]+") + return strings.Trim(reg.ReplaceAllString(strings.ToLower(val), "-"), "- ") +} + +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} + +func containsCaseInsensitiveAny(a []string, x ...string) bool { + for _, n := range a { + for _, c := range x { + if strings.TrimSpace(strings.ToLower(c)) == strings.TrimSpace(strings.ToLower(n)) { + return true + } + } + } + return false +} + +func IsTaggedWithBaseTag(tags []string, baseTag string) bool { // base tags are before the colon ":" like in "aws:ec2" it's "aws". The subtag is after the colon. Also, a pure "aws" tag matches the base tag "aws" + baseTag = strings.ToLower(strings.TrimSpace(baseTag)) + for _, tag := range tags { + tag = strings.ToLower(strings.TrimSpace(tag)) + if tag == baseTag || strings.HasPrefix(tag, baseTag+":") { + return true + } + } + return false +} + +type byDataAssetTitleSort []DataAsset + +func (what byDataAssetTitleSort) Len() int { return len(what) } +func (what byDataAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what byDataAssetTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} diff --git a/pkg/security/types/model.go b/pkg/security/types/model.go new file mode 100644 index 00000000..97128f7a --- /dev/null +++ b/pkg/security/types/model.go @@ -0,0 +1,427 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "fmt" + "github.com/threagile/threagile/pkg/input" + "regexp" + "slices" + "sort" + "strings" +) + +// TODO: move model out of types package and +// rename parsedModel to model or something like this to emphasize that it's just a model +// maybe +type ParsedModel struct { + ThreagileVersion string `yaml:"threagile_version,omitempty" json:"threagile_version,omitempty"` + Includes []string `yaml:"includes,omitempty" json:"includes,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Author input.Author `json:"author,omitempty" yaml:"author,omitempty"` + Contributors []input.Author `yaml:"contributors,omitempty" json:"contributors,omitempty"` + Date Date `json:"date,omitempty" yaml:"date,omitempty"` + AppDescription input.Overview `yaml:"application_description,omitempty" json:"application_description,omitempty"` + BusinessOverview input.Overview `json:"business_overview,omitempty" yaml:"business_overview,omitempty"` + TechnicalOverview input.Overview `json:"technical_overview,omitempty" yaml:"technical_overview,omitempty"` + BusinessCriticality Criticality `json:"business_criticality,omitempty" yaml:"business_criticality,omitempty"` + ManagementSummaryComment string `json:"management_summary_comment,omitempty" yaml:"management_summary_comment,omitempty"` + SecurityRequirements map[string]string `json:"security_requirements,omitempty" yaml:"security_requirements,omitempty"` + Questions map[string]string `json:"questions,omitempty" yaml:"questions,omitempty"` + AbuseCases map[string]string `json:"abuse_cases,omitempty" yaml:"abuse_cases,omitempty"` + TagsAvailable []string `json:"tags_available,omitempty" yaml:"tags_available,omitempty"` + DataAssets map[string]DataAsset `json:"data_assets,omitempty" yaml:"data_assets,omitempty"` + TechnicalAssets map[string]TechnicalAsset `json:"technical_assets,omitempty" yaml:"technical_assets,omitempty"` + TrustBoundaries map[string]TrustBoundary `json:"trust_boundaries,omitempty" yaml:"trust_boundaries,omitempty"` + SharedRuntimes map[string]SharedRuntime `json:"shared_runtimes,omitempty" yaml:"shared_runtimes,omitempty"` + IndividualRiskCategories map[string]RiskCategory `json:"individual_risk_categories,omitempty" yaml:"individual_risk_categories,omitempty"` + BuiltInRiskCategories map[string]RiskCategory `json:"built_in_risk_categories,omitempty" yaml:"built_in_risk_categories,omitempty"` + RiskTracking map[string]RiskTracking `json:"risk_tracking,omitempty" yaml:"risk_tracking,omitempty"` + CommunicationLinks map[string]CommunicationLink `json:"communication_links,omitempty" yaml:"communication_links,omitempty"` + AllSupportedTags map[string]bool `json:"all_supported_tags,omitempty" yaml:"all_supported_tags,omitempty"` + DiagramTweakNodesep int `json:"diagram_tweak_nodesep,omitempty" yaml:"diagram_tweak_nodesep,omitempty"` + DiagramTweakRanksep int `json:"diagram_tweak_ranksep,omitempty" yaml:"diagram_tweak_ranksep,omitempty"` + DiagramTweakEdgeLayout string `json:"diagram_tweak_edge_layout,omitempty" yaml:"diagram_tweak_edge_layout,omitempty"` + DiagramTweakSuppressEdgeLabels bool `json:"diagram_tweak_suppress_edge_labels,omitempty" yaml:"diagram_tweak_suppress_edge_labels,omitempty"` + DiagramTweakLayoutLeftToRight bool `json:"diagram_tweak_layout_left_to_right,omitempty" yaml:"diagram_tweak_layout_left_to_right,omitempty"` + DiagramTweakInvisibleConnectionsBetweenAssets []string `json:"diagram_tweak_invisible_connections_between_assets,omitempty" yaml:"diagram_tweak_invisible_connections_between_assets,omitempty"` + DiagramTweakSameRankAssets []string `json:"diagram_tweak_same_rank_assets,omitempty" yaml:"diagram_tweak_same_rank_assets,omitempty"` + + // TODO: those are generated based on items above and needs to be private + IncomingTechnicalCommunicationLinksMappedByTargetId map[string][]CommunicationLink `json:"incoming_technical_communication_links_mapped_by_target_id,omitempty" yaml:"incoming_technical_communication_links_mapped_by_target_id,omitempty"` + DirectContainingTrustBoundaryMappedByTechnicalAssetId map[string]TrustBoundary `json:"direct_containing_trust_boundary_mapped_by_technical_asset_id,omitempty" yaml:"direct_containing_trust_boundary_mapped_by_technical_asset_id,omitempty"` + GeneratedRisksByCategory map[string][]Risk `json:"generated_risks_by_category,omitempty" yaml:"generated_risks_by_category,omitempty"` + GeneratedRisksBySyntheticId map[string]Risk `json:"generated_risks_by_synthetic_id,omitempty" yaml:"generated_risks_by_synthetic_id,omitempty"` +} + +func (parsedModel *ParsedModel) AddToListOfSupportedTags(tags []string) { + for _, tag := range tags { + parsedModel.AllSupportedTags[tag] = true + } +} + +func (parsedModel *ParsedModel) GetDeferredRiskTrackingDueToWildcardMatching() map[string]RiskTracking { + deferredRiskTrackingDueToWildcardMatching := make(map[string]RiskTracking) + for syntheticRiskId, riskTracking := range parsedModel.RiskTracking { + if strings.Contains(syntheticRiskId, "*") { // contains a wildcard char + deferredRiskTrackingDueToWildcardMatching[syntheticRiskId] = riskTracking + } + } + + return deferredRiskTrackingDueToWildcardMatching +} + +func (parsedModel *ParsedModel) HasNotYetAnyDirectNonWildcardRiskTracking(syntheticRiskId string) bool { + if _, ok := parsedModel.RiskTracking[syntheticRiskId]; ok { + return false + } + return true +} + +func (parsedModel *ParsedModel) CheckTags(tags []string, where string) ([]string, error) { + var tagsUsed = make([]string, 0) + if tags != nil { + tagsUsed = make([]string, len(tags)) + for i, parsedEntry := range tags { + referencedTag := fmt.Sprintf("%v", parsedEntry) + err := parsedModel.CheckTagExists(referencedTag, where) + if err != nil { + return nil, err + } + tagsUsed[i] = referencedTag + } + } + return tagsUsed, nil +} + +func (parsedModel *ParsedModel) ApplyWildcardRiskTrackingEvaluation(ignoreOrphanedRiskTracking bool, progressReporter progressReporter) error { + progressReporter.Info("Executing risk tracking evaluation") + for syntheticRiskIdPattern, riskTracking := range parsedModel.GetDeferredRiskTrackingDueToWildcardMatching() { + progressReporter.Info("Applying wildcard risk tracking for risk id: " + syntheticRiskIdPattern) + + foundSome := false + var matchingRiskIdExpression = regexp.MustCompile(strings.ReplaceAll(regexp.QuoteMeta(syntheticRiskIdPattern), `\*`, `[^@]+`)) + for syntheticRiskId := range parsedModel.GeneratedRisksBySyntheticId { + if matchingRiskIdExpression.Match([]byte(syntheticRiskId)) && parsedModel.HasNotYetAnyDirectNonWildcardRiskTracking(syntheticRiskId) { + foundSome = true + parsedModel.RiskTracking[syntheticRiskId] = RiskTracking{ + SyntheticRiskId: strings.TrimSpace(syntheticRiskId), + Justification: riskTracking.Justification, + CheckedBy: riskTracking.CheckedBy, + Ticket: riskTracking.Ticket, + Status: riskTracking.Status, + Date: riskTracking.Date, + } + } + } + + if !foundSome { + if ignoreOrphanedRiskTracking { + progressReporter.Warn("WARNING: Wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern) + } else { + return errors.New("wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern) + } + } + } + return nil +} + +func (parsedModel *ParsedModel) CheckRiskTracking(ignoreOrphanedRiskTracking bool, progressReporter progressReporter) error { + progressReporter.Info("Checking risk tracking") + for _, tracking := range parsedModel.RiskTracking { + if _, ok := parsedModel.GeneratedRisksBySyntheticId[tracking.SyntheticRiskId]; !ok { + if ignoreOrphanedRiskTracking { + progressReporter.Info("Risk tracking references unknown risk (risk id not found): " + tracking.SyntheticRiskId) + } else { + return errors.New("Risk tracking references unknown risk (risk id not found) - you might want to use the option -ignore-orphaned-risk-tracking: " + tracking.SyntheticRiskId + + "\n\nNOTE: For risk tracking each risk-id needs to be defined (the string with the @ sign in it). " + + "These unique risk IDs are visible in the PDF report (the small grey string under each risk), " + + "the Excel (column \"ID\"), as well as the JSON responses. Some risk IDs have only one @ sign in them, " + + "while others multiple. The idea is to allow for unique but still speaking IDs. Therefore each risk instance " + + "creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. " + + "Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. " + + "Best is to lookup the IDs to use in the created Excel file. Alternatively a model macro \"seed-risk-tracking\" " + + "is available that helps in initially seeding the risk tracking part here based on already identified and not yet handled risks.") + } + } + } + + // save also the risk-category-id and risk-status directly in the risk for better JSON marshalling + for category := range parsedModel.GeneratedRisksByCategory { + for i := range parsedModel.GeneratedRisksByCategory[category] { + // context.parsedModel.GeneratedRisksByCategory[category][i].CategoryId = category + parsedModel.GeneratedRisksByCategory[category][i].RiskStatus = parsedModel.GeneratedRisksByCategory[category][i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel) + } + } + return nil +} + +func (parsedModel *ParsedModel) CheckTagExists(referencedTag, where string) error { + if !slices.Contains(parsedModel.TagsAvailable, referencedTag) { + return errors.New("missing referenced tag in overall tag list at " + where + ": " + referencedTag) + } + return nil +} + +func (parsedModel *ParsedModel) CheckDataAssetTargetExists(referencedAsset, where string) error { + if _, ok := parsedModel.DataAssets[referencedAsset]; !ok { + return errors.New("missing referenced data asset target at " + where + ": " + referencedAsset) + } + return nil +} + +func (parsedModel *ParsedModel) CheckTrustBoundaryExists(referencedId, where string) error { + if _, ok := parsedModel.TrustBoundaries[referencedId]; !ok { + return errors.New("missing referenced trust boundary at " + where + ": " + referencedId) + } + return nil +} + +func (parsedModel *ParsedModel) CheckSharedRuntimeExists(referencedId, where string) error { + if _, ok := parsedModel.SharedRuntimes[referencedId]; !ok { + return errors.New("missing referenced shared runtime at " + where + ": " + referencedId) + } + return nil +} + +func (parsedModel *ParsedModel) CheckCommunicationLinkExists(referencedId, where string) error { + if _, ok := parsedModel.CommunicationLinks[referencedId]; !ok { + return errors.New("missing referenced communication link at " + where + ": " + referencedId) + } + return nil +} + +func (parsedModel *ParsedModel) CheckTechnicalAssetExists(referencedAsset, where string, onlyForTweak bool) error { + if _, ok := parsedModel.TechnicalAssets[referencedAsset]; !ok { + suffix := "" + if onlyForTweak { + suffix = " (only referenced in diagram tweak)" + } + return errors.New("missing referenced technical asset target" + suffix + " at " + where + ": " + referencedAsset) + } + return nil +} + +func (parsedModel *ParsedModel) CheckNestedTrustBoundariesExisting() error { + for _, trustBoundary := range parsedModel.TrustBoundaries { + for _, nestedId := range trustBoundary.TrustBoundariesNested { + if _, ok := parsedModel.TrustBoundaries[nestedId]; !ok { + return errors.New("missing referenced nested trust boundary: " + nestedId) + } + } + } + return nil +} + +func CalculateSeverity(likelihood RiskExploitationLikelihood, impact RiskExploitationImpact) RiskSeverity { + result := likelihood.Weight() * impact.Weight() + if result <= 1 { + return LowSeverity + } + if result <= 3 { + return MediumSeverity + } + if result <= 8 { + return ElevatedSeverity + } + if result <= 12 { + return HighSeverity + } + return CriticalSeverity +} + +func (parsedModel *ParsedModel) InScopeTechnicalAssets() []TechnicalAsset { + result := make([]TechnicalAsset, 0) + for _, asset := range parsedModel.TechnicalAssets { + if !asset.OutOfScope { + result = append(result, asset) + } + } + return result +} + +func (parsedModel *ParsedModel) SortedTechnicalAssetIDs() []string { + res := make([]string, 0) + for id := range parsedModel.TechnicalAssets { + res = append(res, id) + } + sort.Strings(res) + return res +} + +func (parsedModel *ParsedModel) TagsActuallyUsed() []string { + result := make([]string, 0) + for _, tag := range parsedModel.TagsAvailable { + if len(parsedModel.TechnicalAssetsTaggedWithAny(tag)) > 0 || + len(parsedModel.CommunicationLinksTaggedWithAny(tag)) > 0 || + len(parsedModel.DataAssetsTaggedWithAny(tag)) > 0 || + len(parsedModel.TrustBoundariesTaggedWithAny(tag)) > 0 || + len(parsedModel.SharedRuntimesTaggedWithAny(tag)) > 0 { + result = append(result, tag) + } + } + return result +} + +func (parsedModel *ParsedModel) TechnicalAssetsTaggedWithAny(tags ...string) []TechnicalAsset { + result := make([]TechnicalAsset, 0) + for _, candidate := range parsedModel.TechnicalAssets { + if candidate.IsTaggedWithAny(tags...) { + result = append(result, candidate) + } + } + return result +} + +func (parsedModel *ParsedModel) CommunicationLinksTaggedWithAny(tags ...string) []CommunicationLink { + result := make([]CommunicationLink, 0) + for _, asset := range parsedModel.TechnicalAssets { + for _, candidate := range asset.CommunicationLinks { + if candidate.IsTaggedWithAny(tags...) { + result = append(result, candidate) + } + } + } + return result +} + +func (parsedModel *ParsedModel) DataAssetsTaggedWithAny(tags ...string) []DataAsset { + result := make([]DataAsset, 0) + for _, candidate := range parsedModel.DataAssets { + if candidate.IsTaggedWithAny(tags...) { + result = append(result, candidate) + } + } + return result +} + +func (parsedModel *ParsedModel) TrustBoundariesTaggedWithAny(tags ...string) []TrustBoundary { + result := make([]TrustBoundary, 0) + for _, candidate := range parsedModel.TrustBoundaries { + if candidate.IsTaggedWithAny(tags...) { + result = append(result, candidate) + } + } + return result +} + +func (parsedModel *ParsedModel) SharedRuntimesTaggedWithAny(tags ...string) []SharedRuntime { + result := make([]SharedRuntime, 0) + for _, candidate := range parsedModel.SharedRuntimes { + if candidate.IsTaggedWithAny(tags...) { + result = append(result, candidate) + } + } + return result +} + +func (parsedModel *ParsedModel) OutOfScopeTechnicalAssets() []TechnicalAsset { + assets := make([]TechnicalAsset, 0) + for _, asset := range parsedModel.TechnicalAssets { + if asset.OutOfScope { + assets = append(assets, asset) + } + } + sort.Sort(ByTechnicalAssetTitleSort(assets)) + return assets +} + +func (parsedModel *ParsedModel) RisksOfOnlySTRIDEInformationDisclosure(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == InformationDisclosure { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlySTRIDEDenialOfService(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == DenialOfService { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlySTRIDEElevationOfPrivilege(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == ElevationOfPrivilege { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlyBusinessSide(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == BusinessSide { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlyArchitecture(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Architecture { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlyDevelopment(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Development { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func (parsedModel *ParsedModel) RisksOfOnlyOperation(risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, categoryRisks := range risksByCategory { + for _, risk := range categoryRisks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Operations { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +type progressReporter interface { + Info(a ...any) + Warn(a ...any) + Error(a ...any) +} diff --git a/pkg/security/types/protocol.go b/pkg/security/types/protocol.go new file mode 100644 index 00000000..532d26bc --- /dev/null +++ b/pkg/security/types/protocol.go @@ -0,0 +1,255 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Protocol int + +const ( + UnknownProtocol Protocol = iota + HTTP + HTTPS + WS + WSS + ReverseProxyWebProtocol + ReverseProxyWebProtocolEncrypted + MQTT + JDBC + JdbcEncrypted + ODBC + OdbcEncrypted + SqlAccessProtocol + SqlAccessProtocolEncrypted + NosqlAccessProtocol + NosqlAccessProtocolEncrypted + BINARY + BinaryEncrypted + TEXT + TextEncrypted + SSH + SshTunnel + SMTP + SmtpEncrypted + POP3 + Pop3Encrypted + IMAP + ImapEncrypted + FTP + FTPS + SFTP + SCP + LDAP + LDAPS + JMS + NFS + SMB + SmbEncrypted + LocalFileAccess + NRPE + XMPP + IIOP + IiopEncrypted + JRMP + JrmpEncrypted + InProcessLibraryCall + ContainerSpawning +) + +func ProtocolValues() []TypeEnum { + return []TypeEnum{ + UnknownProtocol, + HTTP, + HTTPS, + WS, + WSS, + ReverseProxyWebProtocol, + ReverseProxyWebProtocolEncrypted, + MQTT, + JDBC, + JdbcEncrypted, + ODBC, + OdbcEncrypted, + SqlAccessProtocol, + SqlAccessProtocolEncrypted, + NosqlAccessProtocol, + NosqlAccessProtocolEncrypted, + BINARY, + BinaryEncrypted, + TEXT, + TextEncrypted, + SSH, + SshTunnel, + SMTP, + SmtpEncrypted, + POP3, + Pop3Encrypted, + IMAP, + ImapEncrypted, + FTP, + FTPS, + SFTP, + SCP, + LDAP, + LDAPS, + JMS, + NFS, + SMB, + SmbEncrypted, + LocalFileAccess, + NRPE, + XMPP, + IIOP, + IiopEncrypted, + JRMP, + JrmpEncrypted, + InProcessLibraryCall, + ContainerSpawning, + } +} + +var ProtocolTypeDescription = [...]TypeDescription{ + {"unknown-protocol", "Unknown protocol"}, + {"http", "HTTP protocol"}, + {"https", "HTTPS protocol (encrypted)"}, + {"ws", "WebSocket"}, + {"wss", "WebSocket but encrypted"}, + {"reverse-proxy-web-protocol", "Protocols used by reverse proxies"}, + {"reverse-proxy-web-protocol-encrypted", "Protocols used by reverse proxies but encrypted"}, + {"mqtt", "MQTT Message protocol. Encryption via TLS is optional"}, + {"jdbc", "Java Database Connectivity"}, + {"jdbc-encrypted", "Java Database Connectivity but encrypted"}, + {"odbc", "Open Database Connectivity"}, + {"odbc-encrypted", "Open Database Connectivity but encrypted"}, + {"sql-access-protocol", "SQL access protocol"}, + {"sql-access-protocol-encrypted", "SQL access protocol but encrypted"}, + {"nosql-access-protocol", "NOSQL access protocol"}, + {"nosql-access-protocol-encrypted", "NOSQL access protocol but encrypted"}, + {"binary", "Some other binary protocol"}, + {"binary-encrypted", "Some other binary protocol, encrypted"}, + {"text", "Some other text protocol"}, + {"text-encrypted", "Some other text protocol, encrypted"}, + {"ssh", "Secure Shell to execute commands"}, + {"ssh-tunnel", "Secure Shell as a tunnel"}, + {"smtp", "Mail transfer protocol (sending)"}, + {"smtp-encrypted", "Mail transfer protocol (sending), encrypted"}, + {"pop3", "POP 3 mail fetching"}, + {"pop3-encrypted", "POP 3 mail fetching, encrypted"}, + {"imap", "IMAP mail sync protocol"}, + {"imap-encrypted", "IMAP mail sync protocol, encrypted"}, + {"ftp", "File Transfer Protocol"}, + {"ftps", "FTP with TLS"}, + {"sftp", "FTP on SSH"}, + {"scp", "Secure Shell to copy files"}, + {"ldap", "Lightweight Directory Access Protocol - User directories"}, + {"ldaps", "Lightweight Directory Access Protocol - User directories on TLS"}, + {"jms", "Jakarta Messaging"}, + {"nfs", "Network File System"}, + {"smb", "Server Message Block"}, + {"smb-encrypted", "Server Message Block, but encrypted"}, + {"local-file-access", "Data files are on the local system"}, + {"nrpe", "Nagios Remote Plugin Executor"}, + {"xmpp", "Extensible Messaging and Presence Protocol"}, + {"iiop", "Internet Inter-ORB Protocol "}, + {"iiop-encrypted", "Internet Inter-ORB Protocol , encrypted"}, + {"jrmp", "Java Remote Method Protocol"}, + {"jrmp-encrypted", "Java Remote Method Protocol, encrypted"}, + {"in-process-library-call", "Call to local library"}, + {"container-spawning", "Spawn a container"}, +} + +func ParseProtocol(value string) (protocol Protocol, err error) { + value = strings.TrimSpace(value) + for _, candidate := range ProtocolValues() { + if candidate.String() == value { + return candidate.(Protocol), err + } + } + return protocol, errors.New("Unable to parse into type: " + value) +} + +func (what Protocol) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return ProtocolTypeDescription[what].Name +} + +func (what Protocol) Explain() string { + return ProtocolTypeDescription[what].Description +} + +func (what Protocol) IsProcessLocal() bool { + return what == InProcessLibraryCall || what == LocalFileAccess || what == ContainerSpawning +} + +func (what Protocol) IsEncrypted() bool { + return what == HTTPS || what == WSS || what == JdbcEncrypted || what == OdbcEncrypted || + what == NosqlAccessProtocolEncrypted || what == SqlAccessProtocolEncrypted || what == BinaryEncrypted || what == TextEncrypted || what == SSH || what == SshTunnel || + what == FTPS || what == SFTP || what == SCP || what == LDAPS || what == ReverseProxyWebProtocolEncrypted || + what == IiopEncrypted || what == JrmpEncrypted || what == SmbEncrypted || what == SmtpEncrypted || what == Pop3Encrypted || what == ImapEncrypted +} + +func (what Protocol) IsPotentialDatabaseAccessProtocol(includingLaxDatabaseProtocols bool) bool { + strictlyDatabaseOnlyProtocol := what == JdbcEncrypted || what == OdbcEncrypted || + what == NosqlAccessProtocolEncrypted || what == SqlAccessProtocolEncrypted || what == JDBC || what == ODBC || what == NosqlAccessProtocol || what == SqlAccessProtocol + if includingLaxDatabaseProtocols { + // include HTTP for REST-based NoSQL-DBs as well as unknown binary + return strictlyDatabaseOnlyProtocol || what == HTTPS || what == HTTP || what == BINARY || what == BinaryEncrypted + } + return strictlyDatabaseOnlyProtocol +} + +func (what Protocol) IsPotentialWebAccessProtocol() bool { + return what == HTTP || what == HTTPS || what == WS || what == WSS || what == ReverseProxyWebProtocol || what == ReverseProxyWebProtocolEncrypted +} + +func (what Protocol) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Protocol) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Protocol) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Protocol) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Protocol) find(value string) (Protocol, error) { + for index, description := range ProtocolTypeDescription { + if strings.EqualFold(value, description.Name) { + return Protocol(index), nil + } + } + + return Protocol(0), fmt.Errorf("unknown protocol value %q", value) +} diff --git a/pkg/security/types/protocol_test.go b/pkg/security/types/protocol_test.go new file mode 100644 index 00000000..cd9cefd4 --- /dev/null +++ b/pkg/security/types/protocol_test.go @@ -0,0 +1,224 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseProtocolTest struct { + input string + expected Protocol + expectedError error +} + +func TestParseProtocol(t *testing.T) { + testCases := map[string]ParseProtocolTest{ + "unknown-protocol": { + input: "unknown-protocol", + expected: UnknownProtocol, + }, + "http": { + input: "http", + expected: HTTP, + }, + "https": { + input: "https", + expected: HTTPS, + }, + "ws": { + input: "ws", + expected: WS, + }, + "wss": { + input: "wss", + expected: WSS, + }, + "reverse-proxy-web-protocol": { + input: "reverse-proxy-web-protocol", + expected: ReverseProxyWebProtocol, + }, + "reverse-proxy-web-protocol-encrypted": { + input: "reverse-proxy-web-protocol-encrypted", + expected: ReverseProxyWebProtocolEncrypted, + }, + "mqtt": { + input: "mqtt", + expected: MQTT, + }, + "jdbc": { + input: "jdbc", + expected: JDBC, + }, + "jdbc-encrypted": { + input: "jdbc-encrypted", + expected: JdbcEncrypted, + }, + "odbc": { + input: "odbc", + expected: ODBC, + }, + "odbc-encrypted": { + input: "odbc-encrypted", + expected: OdbcEncrypted, + }, + "sql-access-protocol": { + input: "sql-access-protocol", + expected: SqlAccessProtocol, + }, + "sql-access-protocol-encrypted": { + input: "sql-access-protocol-encrypted", + expected: SqlAccessProtocolEncrypted, + }, + "nosql-access-protocol": { + input: "nosql-access-protocol", + expected: NosqlAccessProtocol, + }, + "nosql-access-protocol-encrypted": { + input: "nosql-access-protocol-encrypted", + expected: NosqlAccessProtocolEncrypted, + }, + "binary": { + input: "binary", + expected: BINARY, + }, + "binary-encrypted": { + input: "binary-encrypted", + expected: BinaryEncrypted, + }, + "text": { + input: "text", + expected: TEXT, + }, + "text-encrypted": { + input: "text-encrypted", + expected: TextEncrypted, + }, + "ssh": { + input: "ssh", + expected: SSH, + }, + "ssh-tunnel": { + input: "ssh-tunnel", + expected: SshTunnel, + }, + "smtp": { + input: "smtp", + expected: SMTP, + }, + "smtp-encrypted": { + input: "smtp-encrypted", + expected: SmtpEncrypted, + }, + "pop3": { + input: "pop3", + expected: POP3, + }, + "pop3-encrypted": { + input: "pop3-encrypted", + expected: Pop3Encrypted, + }, + "imap": { + input: "imap", + expected: IMAP, + }, + "imap-encrypted": { + input: "imap-encrypted", + expected: ImapEncrypted, + }, + "ftp": { + input: "ftp", + expected: FTP, + }, + "ftps": { + input: "ftps", + expected: FTPS, + }, + "sftp": { + input: "sftp", + expected: SFTP, + }, + "scp": { + input: "scp", + expected: SCP, + }, + "ldap": { + input: "ldap", + expected: LDAP, + }, + "ldaps": { + input: "ldaps", + expected: LDAPS, + }, + "jms": { + input: "jms", + expected: JMS, + }, + "nfs": { + input: "nfs", + expected: NFS, + }, + "smb": { + input: "smb", + expected: SMB, + }, + "smb-encrypted": { + input: "smb-encrypted", + expected: SmbEncrypted, + }, + "local-file-access": { + input: "local-file-access", + expected: LocalFileAccess, + }, + "nrpe": { + input: "nrpe", + expected: NRPE, + }, + "xmpp": { + input: "xmpp", + expected: XMPP, + }, + "iiop": { + input: "iiop", + expected: IIOP, + }, + "iiop-encrypted": { + input: "iiop-encrypted", + expected: IiopEncrypted, + }, + "jrmp": { + input: "jrmp", + expected: JRMP, + }, + "jrmp-encrypted": { + input: "jrmp-encrypted", + expected: JrmpEncrypted, + }, + "in-process-library-call": { + input: "in-process-library-call", + expected: InProcessLibraryCall, + }, + "container-spawning": { + input: "container-spawning", + expected: ContainerSpawning, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseProtocol(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/quantity.go b/pkg/security/types/quantity.go new file mode 100644 index 00000000..f4942267 --- /dev/null +++ b/pkg/security/types/quantity.go @@ -0,0 +1,110 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Quantity int + +const ( + VeryFew Quantity = iota + Few + Many + VeryMany +) + +func QuantityValues() []TypeEnum { + return []TypeEnum{ + VeryFew, + Few, + Many, + VeryMany, + } +} + +func ParseQuantity(value string) (quantity Quantity, err error) { + value = strings.TrimSpace(value) + for _, candidate := range QuantityValues() { + if candidate.String() == value { + return candidate.(Quantity), err + } + } + return quantity, errors.New("Unable to parse into type: " + value) +} + +var QuantityTypeDescription = [...]TypeDescription{ + {"very-few", "Very few"}, + {"few", "Few"}, + {"many", "Many"}, + {"very-many", "Very many"}, +} + +func (what Quantity) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return QuantityTypeDescription[what].Name +} + +func (what Quantity) Explain() string { + return QuantityTypeDescription[what].Description +} + +func (what Quantity) Title() string { + return [...]string{"very few", "few", "many", "very many"}[what] +} + +func (what Quantity) QuantityFactor() float64 { + // fibonacci starting at 1 + return [...]float64{1, 2, 3, 5}[what] +} + +func (what Quantity) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Quantity) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Quantity) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Quantity) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Quantity) find(value string) (Quantity, error) { + for index, description := range QuantityTypeDescription { + if strings.EqualFold(value, description.Name) { + return Quantity(index), nil + } + } + + return Quantity(0), fmt.Errorf("unknown quantity value %q", value) +} diff --git a/pkg/security/types/quantity_test.go b/pkg/security/types/quantity_test.go new file mode 100644 index 00000000..1ddfb52c --- /dev/null +++ b/pkg/security/types/quantity_test.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseQuantityTest struct { + input string + expected Quantity + expectedError error +} + +func TestParseQuantity(t *testing.T) { + testCases := map[string]ParseQuantityTest{ + "very-few": { + input: "very-few", + expected: VeryFew, + }, + "few": { + input: "few", + expected: Few, + }, + "many": { + input: "many", + expected: Many, + }, + "very-many": { + input: "very-many", + expected: VeryMany, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseQuantity(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risk-category.go b/pkg/security/types/risk-category.go new file mode 100644 index 00000000..58a17b2c --- /dev/null +++ b/pkg/security/types/risk-category.go @@ -0,0 +1,21 @@ +package types + +type RiskCategory struct { + // TODO: refactor all "Id" here and elsewhere to "ID" + Id string `json:"id,omitempty" yaml:"id,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Impact string `json:"impact,omitempty" yaml:"impact,omitempty"` + ASVS string `json:"asvs,omitempty" yaml:"asvs,omitempty"` + CheatSheet string `json:"cheat_sheet,omitempty" yaml:"cheat_sheet,omitempty"` + Action string `json:"action,omitempty" yaml:"action,omitempty"` + Mitigation string `json:"mitigation,omitempty" yaml:"mitigation,omitempty"` + Check string `json:"check,omitempty" yaml:"check,omitempty"` + DetectionLogic string `json:"detection_logic,omitempty" yaml:"detection_logic,omitempty"` + RiskAssessment string `json:"risk_assessment,omitempty" yaml:"risk_assessment,omitempty"` + FalsePositives string `json:"false_positives,omitempty" yaml:"false_positives,omitempty"` + Function RiskFunction `json:"function,omitempty" yaml:"function,omitempty"` + STRIDE STRIDE `json:"stride,omitempty" yaml:"stride,omitempty"` + ModelFailurePossibleReason bool `json:"model_failure_possible_reason,omitempty" yaml:"model_failure_possible_reason,omitempty"` + CWE int `json:"cwe,omitempty" yaml:"cwe,omitempty"` +} diff --git a/pkg/security/types/risk-tracking.go b/pkg/security/types/risk-tracking.go new file mode 100644 index 00000000..fc464226 --- /dev/null +++ b/pkg/security/types/risk-tracking.go @@ -0,0 +1,10 @@ +package types + +type RiskTracking struct { + SyntheticRiskId string `json:"synthetic_risk_id,omitempty" yaml:"synthetic_risk_id,omitempty"` + Justification string `json:"justification,omitempty" yaml:"justification,omitempty"` + Ticket string `json:"ticket,omitempty" yaml:"ticket,omitempty"` + CheckedBy string `json:"checked_by,omitempty" yaml:"checked_by,omitempty"` + Status RiskStatus `json:"status,omitempty" yaml:"status,omitempty"` + Date Date `json:"date,omitempty" yaml:"date,omitempty"` +} diff --git a/pkg/security/types/risk.go b/pkg/security/types/risk.go new file mode 100644 index 00000000..c98863ed --- /dev/null +++ b/pkg/security/types/risk.go @@ -0,0 +1,41 @@ +package types + +type Risk struct { + CategoryId string `yaml:"category,omitempty" json:"category,omitempty"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically + RiskStatus RiskStatus `yaml:"risk_status,omitempty" json:"risk_status,omitempty"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically + Severity RiskSeverity `yaml:"severity,omitempty" json:"severity,omitempty"` + ExploitationLikelihood RiskExploitationLikelihood `yaml:"exploitation_likelihood,omitempty" json:"exploitation_likelihood,omitempty"` + ExploitationImpact RiskExploitationImpact `yaml:"exploitation_impact,omitempty" json:"exploitation_impact,omitempty"` + Title string `yaml:"title,omitempty" json:"title,omitempty"` + SyntheticId string `yaml:"synthetic_id,omitempty" json:"synthetic_id,omitempty"` + MostRelevantDataAssetId string `yaml:"most_relevant_data_asset,omitempty" json:"most_relevant_data_asset,omitempty"` + MostRelevantTechnicalAssetId string `yaml:"most_relevant_technical_asset,omitempty" json:"most_relevant_technical_asset,omitempty"` + MostRelevantTrustBoundaryId string `yaml:"most_relevant_trust_boundary,omitempty" json:"most_relevant_trust_boundary,omitempty"` + MostRelevantSharedRuntimeId string `yaml:"most_relevant_shared_runtime,omitempty" json:"most_relevant_shared_runtime,omitempty"` + MostRelevantCommunicationLinkId string `yaml:"most_relevant_communication_link,omitempty" json:"most_relevant_communication_link,omitempty"` + DataBreachProbability DataBreachProbability `yaml:"data_breach_probability,omitempty" json:"data_breach_probability,omitempty"` + DataBreachTechnicalAssetIDs []string `yaml:"data_breach_technical_assets,omitempty" json:"data_breach_technical_assets,omitempty"` + // TODO: refactor all "Id" here to "ID"? +} + +func (what Risk) GetRiskTracking(model *ParsedModel) RiskTracking { // TODO: Unify function naming regarding Get etc. + var result RiskTracking + if riskTracking, ok := model.RiskTracking[what.SyntheticId]; ok { + result = riskTracking + } + return result +} + +func (what Risk) GetRiskTrackingStatusDefaultingUnchecked(model *ParsedModel) RiskStatus { + if riskTracking, ok := model.RiskTracking[what.SyntheticId]; ok { + return riskTracking.Status + } + return Unchecked +} + +func (what Risk) IsRiskTracked(model *ParsedModel) bool { + if _, ok := model.RiskTracking[what.SyntheticId]; ok { + return true + } + return false +} diff --git a/pkg/security/types/risk_exploitation_impact.go b/pkg/security/types/risk_exploitation_impact.go new file mode 100644 index 00000000..3fb7f6c3 --- /dev/null +++ b/pkg/security/types/risk_exploitation_impact.go @@ -0,0 +1,112 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type RiskExploitationImpact int + +const ( + LowImpact RiskExploitationImpact = iota + MediumImpact + HighImpact + VeryHighImpact +) + +func RiskExploitationImpactValues() []TypeEnum { + return []TypeEnum{ + LowImpact, + MediumImpact, + HighImpact, + VeryHighImpact, + } +} + +var RiskExploitationImpactTypeDescription = [...]TypeDescription{ + {"low", "Low"}, + {"medium", "Medium"}, + {"high", "High"}, + {"very-high", "Very High"}, +} + +func ParseRiskExploitationImpact(value string) (riskExploitationImpact RiskExploitationImpact, err error) { + value = strings.TrimSpace(value) + if value == "" { + return MediumImpact, nil + } + for _, candidate := range RiskExploitationImpactValues() { + if candidate.String() == value { + return candidate.(RiskExploitationImpact), err + } + } + return riskExploitationImpact, errors.New("Unable to parse into type: " + value) +} + +func (what RiskExploitationImpact) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return RiskExploitationImpactTypeDescription[what].Name +} + +func (what RiskExploitationImpact) Explain() string { + return RiskExploitationImpactTypeDescription[what].Description +} + +func (what RiskExploitationImpact) Title() string { + return [...]string{"Low", "Medium", "High", "Very High"}[what] +} + +func (what RiskExploitationImpact) Weight() int { + return [...]int{1, 2, 3, 4}[what] +} + +func (what RiskExploitationImpact) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *RiskExploitationImpact) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskExploitationImpact) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *RiskExploitationImpact) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskExploitationImpact) find(value string) (RiskExploitationImpact, error) { + for index, description := range RiskExploitationImpactTypeDescription { + if strings.EqualFold(value, description.Name) { + return RiskExploitationImpact(index), nil + } + } + + return RiskExploitationImpact(0), fmt.Errorf("unknown risk exploitation impact value %q", value) +} diff --git a/pkg/security/types/risk_exploitation_impact_test.go b/pkg/security/types/risk_exploitation_impact_test.go new file mode 100644 index 00000000..aa4a03cb --- /dev/null +++ b/pkg/security/types/risk_exploitation_impact_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseRiskExploitationImpactTest struct { + input string + expected RiskExploitationImpact + expectedError error +} + +func TestParseRiskExploitationImpact(t *testing.T) { + testCases := map[string]ParseRiskExploitationImpactTest{ + "low": { + input: "low", + expected: LowImpact, + }, + "medium": { + input: "medium", + expected: MediumImpact, + }, + "high": { + input: "high", + expected: HighImpact, + }, + "very-high": { + input: "very-high", + expected: VeryHighImpact, + }, + "default": { + input: "", + expected: MediumImpact, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseRiskExploitationImpact(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risk_exploitation_likelihood.go b/pkg/security/types/risk_exploitation_likelihood.go new file mode 100644 index 00000000..d9db35b9 --- /dev/null +++ b/pkg/security/types/risk_exploitation_likelihood.go @@ -0,0 +1,112 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type RiskExploitationLikelihood int + +const ( + Unlikely RiskExploitationLikelihood = iota + Likely + VeryLikely + Frequent +) + +func RiskExploitationLikelihoodValues() []TypeEnum { + return []TypeEnum{ + Unlikely, + Likely, + VeryLikely, + Frequent, + } +} + +var RiskExploitationLikelihoodTypeDescription = [...]TypeDescription{ + {"unlikely", "Unlikely"}, + {"likely", "Likely"}, + {"very-likely", "Very-Likely"}, + {"frequent", "Frequent"}, +} + +func ParseRiskExploitationLikelihood(value string) (riskExploitationLikelihood RiskExploitationLikelihood, err error) { + value = strings.TrimSpace(value) + if value == "" { + return Likely, nil + } + for _, candidate := range RiskExploitationLikelihoodValues() { + if candidate.String() == value { + return candidate.(RiskExploitationLikelihood), err + } + } + return riskExploitationLikelihood, errors.New("Unable to parse into type: " + value) +} + +func (what RiskExploitationLikelihood) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return RiskExploitationLikelihoodTypeDescription[what].Name +} + +func (what RiskExploitationLikelihood) Explain() string { + return RiskExploitationLikelihoodTypeDescription[what].Description +} + +func (what RiskExploitationLikelihood) Title() string { + return [...]string{"Unlikely", "Likely", "Very Likely", "Frequent"}[what] +} + +func (what RiskExploitationLikelihood) Weight() int { + return [...]int{1, 2, 3, 4}[what] +} + +func (what RiskExploitationLikelihood) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *RiskExploitationLikelihood) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskExploitationLikelihood) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *RiskExploitationLikelihood) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskExploitationLikelihood) find(value string) (RiskExploitationLikelihood, error) { + for index, description := range RiskExploitationLikelihoodTypeDescription { + if strings.EqualFold(value, description.Name) { + return RiskExploitationLikelihood(index), nil + } + } + + return RiskExploitationLikelihood(0), fmt.Errorf("unknown risk exploration likelihood value %q", value) +} diff --git a/pkg/security/types/risk_exploitation_likelihood_test.go b/pkg/security/types/risk_exploitation_likelihood_test.go new file mode 100644 index 00000000..0c5f060e --- /dev/null +++ b/pkg/security/types/risk_exploitation_likelihood_test.go @@ -0,0 +1,56 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseRiskExploitationLikelihoodTest struct { + input string + expected RiskExploitationLikelihood + expectedError error +} + +func TestParseRiskExploitationLikelihood(t *testing.T) { + testCases := map[string]ParseRiskExploitationLikelihoodTest{ + "unlikely": { + input: "unlikely", + expected: Unlikely, + }, + "likely": { + input: "likely", + expected: Likely, + }, + "very-likely": { + input: "very-likely", + expected: VeryLikely, + }, + "frequent": { + input: "frequent", + expected: Frequent, + }, + "default": { + input: "", + expected: Likely, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseRiskExploitationLikelihood(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risk_function.go b/pkg/security/types/risk_function.go new file mode 100644 index 00000000..8a843591 --- /dev/null +++ b/pkg/security/types/risk_function.go @@ -0,0 +1,105 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type RiskFunction int + +const ( + BusinessSide RiskFunction = iota + Architecture + Development + Operations +) + +func RiskFunctionValues() []TypeEnum { + return []TypeEnum{ + BusinessSide, + Architecture, + Development, + Operations, + } +} + +var RiskFunctionTypeDescription = [...]TypeDescription{ + {"business-side", "Business"}, + {"architecture", "Architecture"}, + {"development", "Development"}, + {"operations", "Operations"}, +} + +func ParseRiskFunction(value string) (riskFunction RiskFunction, err error) { + value = strings.TrimSpace(value) + for _, candidate := range RiskFunctionValues() { + if candidate.String() == value { + return candidate.(RiskFunction), err + } + } + return riskFunction, errors.New("Unable to parse into type: " + value) +} + +func (what RiskFunction) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return RiskFunctionTypeDescription[what].Name +} + +func (what RiskFunction) Explain() string { + return RiskFunctionTypeDescription[what].Description +} + +func (what RiskFunction) Title() string { + return [...]string{"Business Side", "Architecture", "Development", "Operations"}[what] +} + +func (what RiskFunction) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *RiskFunction) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskFunction) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *RiskFunction) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskFunction) find(value string) (RiskFunction, error) { + for index, description := range RiskFunctionTypeDescription { + if strings.EqualFold(value, description.Name) { + return RiskFunction(index), nil + } + } + + return RiskFunction(0), fmt.Errorf("unknown risk function value %q", value) +} diff --git a/pkg/security/types/risk_function_test.go b/pkg/security/types/risk_function_test.go new file mode 100644 index 00000000..68703188 --- /dev/null +++ b/pkg/security/types/risk_function_test.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseRiskFunctionTest struct { + input string + expected RiskFunction + expectedError error +} + +func TestParseRiskFunction(t *testing.T) { + testCases := map[string]ParseRiskFunctionTest{ + "business-side": { + input: "business-side", + expected: BusinessSide, + }, + "architecture": { + input: "architecture", + expected: Architecture, + }, + "development": { + input: "development", + expected: Development, + }, + "operations": { + input: "operations", + expected: Operations, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseRiskFunction(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risk_severity.go b/pkg/security/types/risk_severity.go new file mode 100644 index 00000000..41a665b8 --- /dev/null +++ b/pkg/security/types/risk_severity.go @@ -0,0 +1,111 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type RiskSeverity int + +const ( + LowSeverity RiskSeverity = iota + MediumSeverity + ElevatedSeverity + HighSeverity + CriticalSeverity +) + +func RiskSeverityValues() []TypeEnum { + return []TypeEnum{ + LowSeverity, + MediumSeverity, + ElevatedSeverity, + HighSeverity, + CriticalSeverity, + } +} + +var RiskSeverityTypeDescription = [...]TypeDescription{ + {"low", "Low"}, + {"medium", "Medium"}, + {"elevated", "Elevated"}, + {"high", "High"}, + {"critical", "Critical"}, +} + +func ParseRiskSeverity(value string) (riskSeverity RiskSeverity, err error) { + value = strings.TrimSpace(value) + if value == "" { + return MediumSeverity, nil + } + for _, candidate := range RiskSeverityValues() { + if candidate.String() == value { + return candidate.(RiskSeverity), err + } + } + return riskSeverity, errors.New("Unable to parse into type: " + value) +} + +func (what RiskSeverity) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return RiskSeverityTypeDescription[what].Name +} + +func (what RiskSeverity) Explain() string { + return RiskSeverityTypeDescription[what].Description +} + +func (what RiskSeverity) Title() string { + return [...]string{"Low", "Medium", "Elevated", "High", "Critical"}[what] +} + +func (what RiskSeverity) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *RiskSeverity) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskSeverity) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *RiskSeverity) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskSeverity) find(value string) (RiskSeverity, error) { + for index, description := range RiskSeverityTypeDescription { + if strings.EqualFold(value, description.Name) { + return RiskSeverity(index), nil + } + } + + return RiskSeverity(0), fmt.Errorf("unknown risk severity value %q", value) +} diff --git a/pkg/security/types/risk_severity_test.go b/pkg/security/types/risk_severity_test.go new file mode 100644 index 00000000..4a987eb8 --- /dev/null +++ b/pkg/security/types/risk_severity_test.go @@ -0,0 +1,60 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseRiskSeverityTest struct { + input string + expected RiskSeverity + expectedError error +} + +func TestParseRiskSeverity(t *testing.T) { + testCases := map[string]ParseRiskSeverityTest{ + "low": { + input: "low", + expected: LowSeverity, + }, + "medium": { + input: "medium", + expected: MediumSeverity, + }, + "elevated": { + input: "elevated", + expected: ElevatedSeverity, + }, + "high": { + input: "high", + expected: HighSeverity, + }, + "critical": { + input: "critical", + expected: CriticalSeverity, + }, + "default": { + input: "", + expected: MediumSeverity, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseRiskSeverity(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risk_status.go b/pkg/security/types/risk_status.go new file mode 100644 index 00000000..1f024219 --- /dev/null +++ b/pkg/security/types/risk_status.go @@ -0,0 +1,115 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type RiskStatus int + +const ( + Unchecked RiskStatus = iota + InDiscussion + Accepted + InProgress + Mitigated + FalsePositive +) + +func RiskStatusValues() []TypeEnum { + return []TypeEnum{ + Unchecked, + InDiscussion, + Accepted, + InProgress, + Mitigated, + FalsePositive, + } +} + +var RiskStatusTypeDescription = [...]TypeDescription{ + {"unchecked", "Risk has not yet been reviewed"}, + {"in-discussion", "Risk is currently being discussed (during review)"}, + {"accepted", "Risk has been accepted (as possibly a corporate risk acceptance process defines)"}, + {"in-progress", "Risk mitigation is currently in progress"}, + {"mitigated", "Risk has been mitigated"}, + {"false-positive", "Risk is a false positive (i.e. no risk at all or not applicable)"}, +} + +func ParseRiskStatus(value string) (riskStatus RiskStatus, err error) { + value = strings.TrimSpace(value) + for _, candidate := range RiskStatusValues() { + if candidate.String() == value { + return candidate.(RiskStatus), err + } + } + return riskStatus, errors.New("Unable to parse into type: " + value) +} + +func (what RiskStatus) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return RiskStatusTypeDescription[what].Name +} + +func (what RiskStatus) Explain() string { + return RiskStatusTypeDescription[what].Description +} + +func (what RiskStatus) Title() string { + return [...]string{"Unchecked", "in Discussion", "Accepted", "in Progress", "Mitigated", "False Positive"}[what] +} + +func (what RiskStatus) IsStillAtRisk() bool { + return what == Unchecked || what == InDiscussion || what == Accepted || what == InProgress +} + +func (what RiskStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *RiskStatus) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskStatus) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *RiskStatus) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what RiskStatus) find(value string) (RiskStatus, error) { + for index, description := range RiskStatusTypeDescription { + if strings.EqualFold(value, description.Name) { + return RiskStatus(index), nil + } + } + + return RiskStatus(0), fmt.Errorf("unknown risk status value %q", value) +} diff --git a/pkg/security/types/risk_status_test.go b/pkg/security/types/risk_status_test.go new file mode 100644 index 00000000..989ef3f3 --- /dev/null +++ b/pkg/security/types/risk_status_test.go @@ -0,0 +1,60 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseRiskStatusTest struct { + input string + expected RiskStatus + expectedError error +} + +func TestParseRiskStatus(t *testing.T) { + testCases := map[string]ParseRiskStatusTest{ + "unchecked": { + input: "unchecked", + expected: Unchecked, + }, + "in-discussion": { + input: "in-discussion", + expected: InDiscussion, + }, + "accepted": { + input: "accepted", + expected: Accepted, + }, + "in-progress": { + input: "in-progress", + expected: InProgress, + }, + "mitigated": { + input: "mitigated", + expected: Mitigated, + }, + "false-positive": { + input: "false-positive", + expected: FalsePositive, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseRiskStatus(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/risks.go b/pkg/security/types/risks.go new file mode 100644 index 00000000..72bb3569 --- /dev/null +++ b/pkg/security/types/risks.go @@ -0,0 +1,828 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "sort" +) + +func GetRiskCategory(parsedModel *ParsedModel, categoryID string) *RiskCategory { + if len(parsedModel.IndividualRiskCategories) > 0 { + custom, customOk := parsedModel.IndividualRiskCategories[categoryID] + if customOk { + return &custom + } + } + + if len(parsedModel.BuiltInRiskCategories) > 0 { + builtIn, builtInOk := parsedModel.BuiltInRiskCategories[categoryID] + if builtInOk { + return &builtIn + } + } + + return nil +} + +func GetRiskCategories(parsedModel *ParsedModel, categoryIDs []string) []RiskCategory { + categoryMap := make(map[string]RiskCategory) + for _, categoryId := range categoryIDs { + category := GetRiskCategory(parsedModel, categoryId) + if category != nil { + categoryMap[categoryId] = *category + } + } + + categories := make([]RiskCategory, 0) + for categoryId := range categoryMap { + categories = append(categories, categoryMap[categoryId]) + } + + return categories +} + +func AllRisks(parsedModel *ParsedModel) []Risk { + result := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + result = append(result, risk) + } + } + return result +} + +func ReduceToOnlyStillAtRisk(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func HighestExploitationLikelihood(risks []Risk) RiskExploitationLikelihood { + result := Unlikely + for _, risk := range risks { + if risk.ExploitationLikelihood > result { + result = risk.ExploitationLikelihood + } + } + return result +} + +func HighestExploitationImpact(risks []Risk) RiskExploitationImpact { + result := LowImpact + for _, risk := range risks { + if risk.ExploitationImpact > result { + result = risk.ExploitationImpact + } + } + return result +} + +func HighestSeverityStillAtRisk(model *ParsedModel, risks []Risk) RiskSeverity { + result := LowSeverity + for _, risk := range risks { + if risk.Severity > result && risk.GetRiskTrackingStatusDefaultingUnchecked(model).IsStillAtRisk() { + result = risk.Severity + } + } + return result +} + +type ByRiskCategoryTitleSort []RiskCategory + +func (what ByRiskCategoryTitleSort) Len() int { return len(what) } +func (what ByRiskCategoryTitleSort) Swap(i, j int) { + what[i], what[j] = what[j], what[i] +} +func (what ByRiskCategoryTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} + +func SortByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk(parsedModel *ParsedModel, riskCategories []RiskCategory) { + sort.Slice(riskCategories, func(i, j int) bool { + risksLeft := ReduceToOnlyStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[riskCategories[i].Id]) + risksRight := ReduceToOnlyStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[riskCategories[j].Id]) + highestLeft := HighestSeverityStillAtRisk(parsedModel, risksLeft) + highestRight := HighestSeverityStillAtRisk(parsedModel, risksRight) + if highestLeft == highestRight { + if len(risksLeft) == 0 && len(risksRight) > 0 { + return false + } + if len(risksLeft) > 0 && len(risksRight) == 0 { + return true + } + return riskCategories[i].Title < riskCategories[j].Title + } + return highestLeft > highestRight + }) +} + +type RiskStatistics struct { + // TODO add also some more like before / after (i.e. with mitigation applied) + Risks map[string]map[string]int `yaml:"risks" json:"risks"` +} + +func SortByRiskSeverity(risks []Risk, parsedModel *ParsedModel) { + sort.Slice(risks, func(i, j int) bool { + if risks[i].Severity == risks[j].Severity { + trackingStatusLeft := risks[i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel) + trackingStatusRight := risks[j].GetRiskTrackingStatusDefaultingUnchecked(parsedModel) + if trackingStatusLeft == trackingStatusRight { + impactLeft := risks[i].ExploitationImpact + impactRight := risks[j].ExploitationImpact + if impactLeft == impactRight { + likelihoodLeft := risks[i].ExploitationLikelihood + likelihoodRight := risks[j].ExploitationLikelihood + if likelihoodLeft == likelihoodRight { + return risks[i].Title < risks[j].Title + } else { + return likelihoodLeft > likelihoodRight + } + } else { + return impactLeft > impactRight + } + } else { + return trackingStatusLeft < trackingStatusRight + } + } + return risks[i].Severity > risks[j].Severity + + }) +} + +func SortByDataBreachProbability(risks []Risk, parsedModel *ParsedModel) { + sort.Slice(risks, func(i, j int) bool { + + if risks[i].DataBreachProbability == risks[j].DataBreachProbability { + trackingStatusLeft := risks[i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel) + trackingStatusRight := risks[j].GetRiskTrackingStatusDefaultingUnchecked(parsedModel) + if trackingStatusLeft == trackingStatusRight { + return risks[i].Title < risks[j].Title + } else { + return trackingStatusLeft < trackingStatusRight + } + } + return risks[i].DataBreachProbability > risks[j].DataBreachProbability + }) +} + +// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: + +func SortedRiskCategories(parsedModel *ParsedModel) []RiskCategory { + categoryMap := make(map[string]RiskCategory) + for categoryId := range parsedModel.GeneratedRisksByCategory { + category := GetRiskCategory(parsedModel, categoryId) + if category != nil { + categoryMap[categoryId] = *category + } + } + + categories := make([]RiskCategory, 0) + for categoryId := range categoryMap { + categories = append(categories, categoryMap[categoryId]) + } + + SortByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk(parsedModel, categories) + return categories +} + +func SortedRisksOfCategory(parsedModel *ParsedModel, category RiskCategory) []Risk { + risks := parsedModel.GeneratedRisksByCategory[category.Id] + SortByRiskSeverity(risks, parsedModel) + return risks +} + +func CountRisks(risksByCategory map[string][]Risk) int { + result := 0 + for _, risks := range risksByCategory { + result += len(risks) + } + return result +} + +func RisksOfOnlySTRIDESpoofing(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category != nil { + if category.STRIDE == Spoofing { + result[categoryId] = append(result[categoryId], risk) + } + } + } + } + return result +} + +func RisksOfOnlySTRIDETampering(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category != nil { + if category.STRIDE == Tampering { + result[categoryId] = append(result[categoryId], risk) + } + } + } + } + return result +} + +func RisksOfOnlySTRIDERepudiation(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == Repudiation { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlySTRIDEInformationDisclosure(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == InformationDisclosure { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlySTRIDEDenialOfService(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == DenialOfService { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlySTRIDEElevationOfPrivilege(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.STRIDE == ElevationOfPrivilege { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlyBusinessSide(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == BusinessSide { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlyArchitecture(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Architecture { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlyDevelopment(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Development { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func RisksOfOnlyOperation(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Operations { + result[categoryId] = append(result[categoryId], risk) + } + } + } + return result +} + +func CategoriesOfOnlyRisksStillAtRisk(parsedModel *ParsedModel, risksByCategory map[string][]Risk) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + categories[categoryId] = struct{}{} + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func CategoriesOfOnlyCriticalRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + if risk.Severity == CriticalSeverity { + categories[categoryId] = struct{}{} + } + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func CategoriesOfOnlyHighRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId]) + if !initialRisks { + highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId]) + } + if risk.Severity == HighSeverity && highest < CriticalSeverity { + categories[categoryId] = struct{}{} + } + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func CategoriesOfOnlyElevatedRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId]) + if !initialRisks { + highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId]) + } + if risk.Severity == ElevatedSeverity && highest < HighSeverity { + categories[categoryId] = struct{}{} + } + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func CategoriesOfOnlyMediumRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId]) + if !initialRisks { + highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId]) + } + if risk.Severity == MediumSeverity && highest < ElevatedSeverity { + categories[categoryId] = struct{}{} + } + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func CategoriesOfOnlyLowRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string { + categories := make(map[string]struct{}) // Go's trick of unique elements is a map + for categoryId, risks := range risksByCategory { + for _, risk := range risks { + if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + continue + } + highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId]) + if !initialRisks { + highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId]) + } + if risk.Severity == LowSeverity && highest < MediumSeverity { + categories[categoryId] = struct{}{} + } + } + } + // return as slice (of now unique values) + return keysAsSlice(categories) +} + +func HighestSeverity(risks []Risk) RiskSeverity { + result := LowSeverity + for _, risk := range risks { + if risk.Severity > result { + result = risk.Severity + } + } + return result +} + +func keysAsSlice(categories map[string]struct{}) []string { + result := make([]string, 0, len(categories)) + for k := range categories { + result = append(result, k) + } + return result +} + +func FilteredByOnlyBusinessSide(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for categoryId, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == BusinessSide { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyArchitecture(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for categoryId, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Architecture { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyDevelopment(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for categoryId, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Development { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyOperation(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for categoryId, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + category := GetRiskCategory(parsedModel, categoryId) + if category.Function == Operations { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyCriticalRisks(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.Severity == CriticalSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyHighRisks(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.Severity == HighSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyElevatedRisks(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.Severity == ElevatedSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyMediumRisks(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.Severity == MediumSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByOnlyLowRisks(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.Severity == LowSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilterByModelFailures(parsedModel *ParsedModel, risksByCat map[string][]Risk) map[string][]Risk { + result := make(map[string][]Risk) + for categoryId, risks := range risksByCat { + category := GetRiskCategory(parsedModel, categoryId) + if category.ModelFailurePossibleReason { + result[categoryId] = risks + } + } + + return result +} + +func FlattenRiskSlice(risksByCat map[string][]Risk) []Risk { + result := make([]Risk, 0) + for _, risks := range risksByCat { + result = append(result, risks...) + } + return result +} + +func TotalRiskCount(parsedModel *ParsedModel) int { + count := 0 + for _, risks := range parsedModel.GeneratedRisksByCategory { + count += len(risks) + } + return count +} + +func FilteredByRiskTrackingUnchecked(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Unchecked { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByRiskTrackingInDiscussion(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InDiscussion { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByRiskTrackingAccepted(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Accepted { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByRiskTrackingInProgress(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InProgress { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByRiskTrackingMitigated(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Mitigated { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func FilteredByRiskTrackingFalsePositive(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == FalsePositive { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func ReduceToOnlyHighRisk(risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.Severity == HighSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyMediumRisk(risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.Severity == MediumSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyLowRisk(risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.Severity == LowSeverity { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingUnchecked(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Unchecked { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingInDiscussion(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InDiscussion { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingAccepted(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Accepted { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingInProgress(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InProgress { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingMitigated(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Mitigated { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func ReduceToOnlyRiskTrackingFalsePositive(parsedModel *ParsedModel, risks []Risk) []Risk { + filteredRisks := make([]Risk, 0) + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == FalsePositive { + filteredRisks = append(filteredRisks, risk) + } + } + return filteredRisks +} + +func FilteredByStillAtRisk(parsedModel *ParsedModel) []Risk { + filteredRisks := make([]Risk, 0) + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() { + filteredRisks = append(filteredRisks, risk) + } + } + } + return filteredRisks +} + +func OverallRiskStatistics(parsedModel *ParsedModel) RiskStatistics { + result := RiskStatistics{} + result.Risks = make(map[string]map[string]int) + result.Risks[CriticalSeverity.String()] = make(map[string]int) + result.Risks[CriticalSeverity.String()][Unchecked.String()] = 0 + result.Risks[CriticalSeverity.String()][InDiscussion.String()] = 0 + result.Risks[CriticalSeverity.String()][Accepted.String()] = 0 + result.Risks[CriticalSeverity.String()][InProgress.String()] = 0 + result.Risks[CriticalSeverity.String()][Mitigated.String()] = 0 + result.Risks[CriticalSeverity.String()][FalsePositive.String()] = 0 + result.Risks[HighSeverity.String()] = make(map[string]int) + result.Risks[HighSeverity.String()][Unchecked.String()] = 0 + result.Risks[HighSeverity.String()][InDiscussion.String()] = 0 + result.Risks[HighSeverity.String()][Accepted.String()] = 0 + result.Risks[HighSeverity.String()][InProgress.String()] = 0 + result.Risks[HighSeverity.String()][Mitigated.String()] = 0 + result.Risks[HighSeverity.String()][FalsePositive.String()] = 0 + result.Risks[ElevatedSeverity.String()] = make(map[string]int) + result.Risks[ElevatedSeverity.String()][Unchecked.String()] = 0 + result.Risks[ElevatedSeverity.String()][InDiscussion.String()] = 0 + result.Risks[ElevatedSeverity.String()][Accepted.String()] = 0 + result.Risks[ElevatedSeverity.String()][InProgress.String()] = 0 + result.Risks[ElevatedSeverity.String()][Mitigated.String()] = 0 + result.Risks[ElevatedSeverity.String()][FalsePositive.String()] = 0 + result.Risks[MediumSeverity.String()] = make(map[string]int) + result.Risks[MediumSeverity.String()][Unchecked.String()] = 0 + result.Risks[MediumSeverity.String()][InDiscussion.String()] = 0 + result.Risks[MediumSeverity.String()][Accepted.String()] = 0 + result.Risks[MediumSeverity.String()][InProgress.String()] = 0 + result.Risks[MediumSeverity.String()][Mitigated.String()] = 0 + result.Risks[MediumSeverity.String()][FalsePositive.String()] = 0 + result.Risks[LowSeverity.String()] = make(map[string]int) + result.Risks[LowSeverity.String()][Unchecked.String()] = 0 + result.Risks[LowSeverity.String()][InDiscussion.String()] = 0 + result.Risks[LowSeverity.String()][Accepted.String()] = 0 + result.Risks[LowSeverity.String()][InProgress.String()] = 0 + result.Risks[LowSeverity.String()][Mitigated.String()] = 0 + result.Risks[LowSeverity.String()][FalsePositive.String()] = 0 + for _, risks := range parsedModel.GeneratedRisksByCategory { + for _, risk := range risks { + result.Risks[risk.Severity.String()][risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).String()]++ + } + } + return result +} diff --git a/pkg/security/types/shared_runtime.go b/pkg/security/types/shared_runtime.go new file mode 100644 index 00000000..c23873a6 --- /dev/null +++ b/pkg/security/types/shared_runtime.go @@ -0,0 +1,88 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "sort" +) + +type SharedRuntime struct { + Id string `json:"id,omitempty" yaml:"id,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + TechnicalAssetsRunning []string `json:"technical_assets_running,omitempty" yaml:"technical_assets_running,omitempty"` +} + +func (what SharedRuntime) IsTaggedWithAny(tags ...string) bool { + return containsCaseInsensitiveAny(what.Tags, tags...) +} + +func (what SharedRuntime) IsTaggedWithBaseTag(baseTag string) bool { + return IsTaggedWithBaseTag(what.Tags, baseTag) +} + +func (what SharedRuntime) HighestConfidentiality(model *ParsedModel) Confidentiality { + highest := Public + for _, id := range what.TechnicalAssetsRunning { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestConfidentiality(model) > highest { + highest = techAsset.HighestConfidentiality(model) + } + } + return highest +} + +func (what SharedRuntime) HighestIntegrity(model *ParsedModel) Criticality { + highest := Archive + for _, id := range what.TechnicalAssetsRunning { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestIntegrity(model) > highest { + highest = techAsset.HighestIntegrity(model) + } + } + return highest +} + +func (what SharedRuntime) HighestAvailability(model *ParsedModel) Criticality { + highest := Archive + for _, id := range what.TechnicalAssetsRunning { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestAvailability(model) > highest { + highest = techAsset.HighestAvailability(model) + } + } + return highest +} + +func (what SharedRuntime) TechnicalAssetWithHighestRAA(model *ParsedModel) TechnicalAsset { + result := model.TechnicalAssets[what.TechnicalAssetsRunning[0]] + for _, asset := range what.TechnicalAssetsRunning { + candidate := model.TechnicalAssets[asset] + if candidate.RAA > result.RAA { + result = candidate + } + } + return result +} + +// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: + +func SortedKeysOfSharedRuntime(model *ParsedModel) []string { + keys := make([]string, 0) + for k := range model.SharedRuntimes { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +type BySharedRuntimeTitleSort []SharedRuntime + +func (what BySharedRuntimeTitleSort) Len() int { return len(what) } +func (what BySharedRuntimeTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what BySharedRuntimeTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} diff --git a/pkg/security/types/stride.go b/pkg/security/types/stride.go new file mode 100644 index 00000000..c8015e54 --- /dev/null +++ b/pkg/security/types/stride.go @@ -0,0 +1,111 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type STRIDE int + +const ( + Spoofing STRIDE = iota + Tampering + Repudiation + InformationDisclosure + DenialOfService + ElevationOfPrivilege +) + +func STRIDEValues() []TypeEnum { + return []TypeEnum{ + Spoofing, + Tampering, + Repudiation, + InformationDisclosure, + DenialOfService, + ElevationOfPrivilege, + } +} + +var StrideTypeDescription = [...]TypeDescription{ + {"spoofing", "Spoofing - Authenticity"}, + {"tampering", "Tampering - Integrity"}, + {"repudiation", "Repudiation - Non-repudiability"}, + {"information-disclosure", "Information disclosure - Confidentiality"}, + {"denial-of-service", "Denial of service - Availability"}, + {"elevation-of-privilege", "Elevation of privilege - Authorization"}, +} + +func ParseSTRIDE(value string) (stride STRIDE, err error) { + value = strings.TrimSpace(value) + for _, candidate := range STRIDEValues() { + if candidate.String() == value { + return candidate.(STRIDE), err + } + } + return stride, errors.New("Unable to parse into type: " + value) +} + +func (what STRIDE) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return StrideTypeDescription[what].Name +} + +func (what STRIDE) Explain() string { + return StrideTypeDescription[what].Description +} + +func (what STRIDE) Title() string { + return [...]string{"Spoofing", "Tampering", "Repudiation", "Information Disclosure", "Denial of Service", "Elevation of Privilege"}[what] +} + +func (what STRIDE) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *STRIDE) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what STRIDE) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *STRIDE) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what STRIDE) find(value string) (STRIDE, error) { + for index, description := range StrideTypeDescription { + if strings.EqualFold(value, description.Name) { + return STRIDE(index), nil + } + } + + return STRIDE(0), fmt.Errorf("unknown STRIDE value %q", value) +} diff --git a/pkg/security/types/stride_test.go b/pkg/security/types/stride_test.go new file mode 100644 index 00000000..9f10059e --- /dev/null +++ b/pkg/security/types/stride_test.go @@ -0,0 +1,60 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseStrideTest struct { + input string + expected STRIDE + expectedError error +} + +func TestParseStride(t *testing.T) { + testCases := map[string]ParseStrideTest{ + "spoofing": { + input: "spoofing", + expected: Spoofing, + }, + "tampering": { + input: "tampering", + expected: Tampering, + }, + "repudiation": { + input: "repudiation", + expected: Repudiation, + }, + "information-disclosure": { + input: "information-disclosure", + expected: InformationDisclosure, + }, + "denial-of-service": { + input: "denial-of-service", + expected: DenialOfService, + }, + "elevation-of-privilege": { + input: "elevation-of-privilege", + expected: ElevationOfPrivilege, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseSTRIDE(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/technical_asset.go b/pkg/security/types/technical_asset.go new file mode 100644 index 00000000..eda65cca --- /dev/null +++ b/pkg/security/types/technical_asset.go @@ -0,0 +1,352 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "fmt" + "sort" +) + +type TechnicalAsset struct { + Id string `json:"id,omitempty" yaml:"id,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Usage Usage `json:"usage,omitempty" yaml:"usage,omitempty"` + Type TechnicalAssetType `json:"type,omitempty" yaml:"type,omitempty"` + Size TechnicalAssetSize `json:"size,omitempty" yaml:"size,omitempty"` + Technology TechnicalAssetTechnology `json:"technology,omitempty" yaml:"technology,omitempty"` + Machine TechnicalAssetMachine `json:"machine,omitempty" yaml:"machine,omitempty"` + Internet bool `json:"internet,omitempty" yaml:"internet,omitempty"` + MultiTenant bool `json:"multi_tenant,omitempty" yaml:"multi_tenant,omitempty"` + Redundant bool `json:"redundant,omitempty" yaml:"redundant,omitempty"` + CustomDevelopedParts bool `json:"custom_developed_parts,omitempty" yaml:"custom_developed_parts,omitempty"` + OutOfScope bool `json:"out_of_scope,omitempty" yaml:"out_of_scope,omitempty"` + UsedAsClientByHuman bool `json:"used_as_client_by_human,omitempty" yaml:"used_as_client_by_human,omitempty"` + Encryption EncryptionStyle `json:"encryption,omitempty" yaml:"encryption,omitempty"` + JustificationOutOfScope string `json:"justification_out_of_scope,omitempty" yaml:"justification_out_of_scope,omitempty"` + Owner string `json:"owner,omitempty" yaml:"owner,omitempty"` + Confidentiality Confidentiality `json:"confidentiality,omitempty" yaml:"confidentiality,omitempty"` + Integrity Criticality `json:"integrity,omitempty" yaml:"integrity,omitempty"` + Availability Criticality `json:"availability,omitempty" yaml:"availability,omitempty"` + JustificationCiaRating string `json:"justification_cia_rating,omitempty" yaml:"justification_cia_rating,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + DataAssetsProcessed []string `json:"data_assets_processed,omitempty" yaml:"data_assets_processed,omitempty"` + DataAssetsStored []string `json:"data_assets_stored,omitempty" yaml:"data_assets_stored,omitempty"` + DataFormatsAccepted []DataFormat `json:"data_formats_accepted,omitempty" yaml:"data_formats_accepted,omitempty"` + CommunicationLinks []CommunicationLink `json:"communication_links,omitempty" yaml:"communication_links,omitempty"` + DiagramTweakOrder int `json:"diagram_tweak_order,omitempty" yaml:"diagram_tweak_order,omitempty"` + // will be set by separate calculation step: + RAA float64 `json:"raa,omitempty" yaml:"raa,omitempty"` +} + +func (what TechnicalAsset) IsTaggedWithAny(tags ...string) bool { + return containsCaseInsensitiveAny(what.Tags, tags...) +} + +func (what TechnicalAsset) IsTaggedWithBaseTag(baseTag string) bool { + return IsTaggedWithBaseTag(what.Tags, baseTag) +} + +// first use the tag(s) of the asset itself, then their trust boundaries (recursively up) and then their shared runtime + +func (what TechnicalAsset) IsTaggedWithAnyTraversingUp(model *ParsedModel, tags ...string) bool { + if containsCaseInsensitiveAny(what.Tags, tags...) { + return true + } + tbID := what.GetTrustBoundaryId(model) + if len(tbID) > 0 { + if model.TrustBoundaries[tbID].IsTaggedWithAnyTraversingUp(model, tags...) { + return true + } + } + for _, sr := range model.SharedRuntimes { + if contains(sr.TechnicalAssetsRunning, what.Id) && sr.IsTaggedWithAny(tags...) { + return true + } + } + return false +} + +func (what TechnicalAsset) IsSameTrustBoundary(parsedModel *ParsedModel, otherAssetId string) bool { + trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] + trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] + return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id +} + +func (what TechnicalAsset) IsSameExecutionEnvironment(parsedModel *ParsedModel, otherAssetId string) bool { + trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] + trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] + if trustBoundaryOfMyAsset.Type == ExecutionEnvironment && trustBoundaryOfOtherAsset.Type == ExecutionEnvironment { + return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id + } + return false +} + +func (what TechnicalAsset) IsSameTrustBoundaryNetworkOnly(parsedModel *ParsedModel, otherAssetId string) bool { + trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id] + if !trustBoundaryOfMyAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then + trustBoundaryOfMyAsset = parsedModel.TrustBoundaries[trustBoundaryOfMyAsset.ParentTrustBoundaryID(parsedModel)] + } + trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId] + if !trustBoundaryOfOtherAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then + trustBoundaryOfOtherAsset = parsedModel.TrustBoundaries[trustBoundaryOfOtherAsset.ParentTrustBoundaryID(parsedModel)] + } + return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id +} + +func (what TechnicalAsset) HighestSensitivityScore() float64 { + return what.Confidentiality.AttackerAttractivenessForAsset() + + what.Integrity.AttackerAttractivenessForAsset() + + what.Availability.AttackerAttractivenessForAsset() +} + +func (what TechnicalAsset) HighestConfidentiality(parsedModel *ParsedModel) Confidentiality { + highest := what.Confidentiality + for _, dataId := range what.DataAssetsProcessed { + dataAsset := parsedModel.DataAssets[dataId] + if dataAsset.Confidentiality > highest { + highest = dataAsset.Confidentiality + } + } + return highest +} + +func (what TechnicalAsset) DataAssetsProcessedSorted(parsedModel *ParsedModel) []DataAsset { + result := make([]DataAsset, 0) + for _, assetID := range what.DataAssetsProcessed { + result = append(result, parsedModel.DataAssets[assetID]) + } + sort.Sort(ByDataAssetTitleSort(result)) + return result +} + +func (what TechnicalAsset) DataAssetsStoredSorted(parsedModel *ParsedModel) []DataAsset { + result := make([]DataAsset, 0) + for _, assetID := range what.DataAssetsStored { + result = append(result, parsedModel.DataAssets[assetID]) + } + sort.Sort(ByDataAssetTitleSort(result)) + return result +} + +func (what TechnicalAsset) DataFormatsAcceptedSorted() []DataFormat { + result := make([]DataFormat, 0) + for _, format := range what.DataFormatsAccepted { + result = append(result, format) + } + sort.Sort(ByDataFormatAcceptedSort(result)) + return result +} + +func (what TechnicalAsset) CommunicationLinksSorted() []CommunicationLink { + result := make([]CommunicationLink, 0) + for _, format := range what.CommunicationLinks { + result = append(result, format) + } + sort.Sort(ByTechnicalCommunicationLinkTitleSort(result)) + return result +} + +func (what TechnicalAsset) HighestIntegrity(model *ParsedModel) Criticality { + highest := what.Integrity + for _, dataId := range what.DataAssetsProcessed { + dataAsset := model.DataAssets[dataId] + if dataAsset.Integrity > highest { + highest = dataAsset.Integrity + } + } + return highest +} + +func (what TechnicalAsset) HighestAvailability(model *ParsedModel) Criticality { + highest := what.Availability + for _, dataId := range what.DataAssetsProcessed { + dataAsset := model.DataAssets[dataId] + if dataAsset.Availability > highest { + highest = dataAsset.Availability + } + } + return highest +} + +func (what TechnicalAsset) HasDirectConnection(parsedModel *ParsedModel, otherAssetId string) bool { + for _, dataFlow := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] { + if dataFlow.SourceId == otherAssetId { + return true + } + } + // check both directions, hence two times, just reversed + for _, dataFlow := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[otherAssetId] { + if dataFlow.SourceId == what.Id { + return true + } + } + return false +} + +func (what TechnicalAsset) GeneratedRisks(parsedModel *ParsedModel) []Risk { + resultingRisks := make([]Risk, 0) + if len(SortedRiskCategories(parsedModel)) == 0 { + fmt.Println("Uh, strange, no risks generated (yet?) and asking for them by tech asset...") + } + for _, category := range SortedRiskCategories(parsedModel) { + risks := SortedRisksOfCategory(parsedModel, category) + for _, risk := range risks { + if risk.MostRelevantTechnicalAssetId == what.Id { + resultingRisks = append(resultingRisks, risk) + } + } + } + SortByRiskSeverity(resultingRisks, parsedModel) + return resultingRisks +} + +/* +func (what TechnicalAsset) HighestRiskSeverity() RiskSeverity { + highest := Low + for _, risk := range what.GeneratedRisks() { + if risk.Severity > highest { + highest = risk.Severity + } + } + return highest +} +*/ + +func (what TechnicalAsset) IsZero() bool { + return len(what.Id) == 0 +} + +func (what TechnicalAsset) ProcessesOrStoresDataAsset(dataAssetId string) bool { + return contains(what.DataAssetsProcessed, dataAssetId) +} + +/* +// Loops over all data assets (stored and processed by this technical asset) and determines for each +// data asset, how many percentage of the data risk is reduced when this technical asset has all risks mitigated. +// Example: This means if the data asset is loosing a risk and thus getting from red to amber it counts as 1. +// Other example: When only one out of four lines (see data risk mapping) leading to red tech assets are removed by +// the mitigations, then this counts as 0.25. The overall sum is returned. +func (what TechnicalAsset) QuickWins() float64 { + result := 0.0 + uniqueDataAssetsStoredAndProcessed := make(map[string]interface{}) + for _, dataAssetId := range what.DataAssetsStored { + uniqueDataAssetsStoredAndProcessed[dataAssetId] = true + } + for _, dataAssetId := range what.DataAssetsProcessed { + uniqueDataAssetsStoredAndProcessed[dataAssetId] = true + } + highestSeverity := HighestSeverityStillAtRisk(what.GeneratedRisks()) + for dataAssetId, _ := range uniqueDataAssetsStoredAndProcessed { + dataAsset := ParsedModelRoot.DataAssets[dataAssetId] + if dataAsset.IdentifiedRiskSeverityStillAtRisk() <= highestSeverity { + howManySameLevelCausingUsagesOfThisData := 0.0 + for techAssetId, risks := range dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId() { + if !ParsedModelRoot.TechnicalAssets[techAssetId].OutOfScope { + for _, risk := range risks { + if len(risk.MostRelevantTechnicalAssetId) > 0 { // T O D O caching of generated risks inside the method? + if HighestSeverityStillAtRisk(ParsedModelRoot.TechnicalAssets[risk.MostRelevantTechnicalAssetId].GeneratedRisks()) == highestSeverity { + howManySameLevelCausingUsagesOfThisData++ + break + } + } + } + } + } + if howManySameLevelCausingUsagesOfThisData > 0 { + result += 1.0 / howManySameLevelCausingUsagesOfThisData + } + } + } + return result +} +*/ + +func (what TechnicalAsset) GetTrustBoundaryId(model *ParsedModel) string { + for _, trustBoundary := range model.TrustBoundaries { + for _, techAssetInside := range trustBoundary.TechnicalAssetsInside { + if techAssetInside == what.Id { + return trustBoundary.Id + } + } + } + return "" +} + +func SortByTechnicalAssetRiskSeverityAndTitleStillAtRisk(assets []TechnicalAsset, parsedModel *ParsedModel) { + sort.Slice(assets, func(i, j int) bool { + risksLeft := ReduceToOnlyStillAtRisk(parsedModel, assets[i].GeneratedRisks(parsedModel)) + risksRight := ReduceToOnlyStillAtRisk(parsedModel, assets[j].GeneratedRisks(parsedModel)) + highestSeverityLeft := HighestSeverityStillAtRisk(parsedModel, risksLeft) + highestSeverityRight := HighestSeverityStillAtRisk(parsedModel, risksRight) + var result bool + if highestSeverityLeft == highestSeverityRight { + if len(risksLeft) == 0 && len(risksRight) > 0 { + return false + } else if len(risksLeft) > 0 && len(risksRight) == 0 { + return true + } else { + result = assets[i].Title < assets[j].Title + } + } else { + result = highestSeverityLeft > highestSeverityRight + } + if assets[i].OutOfScope && assets[j].OutOfScope { + result = assets[i].Title < assets[j].Title + } else if assets[i].OutOfScope { + result = false + } else if assets[j].OutOfScope { + result = true + } + return result + }) +} + +type ByTechnicalAssetRAAAndTitleSort []TechnicalAsset + +func (what ByTechnicalAssetRAAAndTitleSort) Len() int { return len(what) } +func (what ByTechnicalAssetRAAAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTechnicalAssetRAAAndTitleSort) Less(i, j int) bool { + raaLeft := what[i].RAA + raaRight := what[j].RAA + if raaLeft == raaRight { + return what[i].Title < what[j].Title + } + return raaLeft > raaRight +} + +/* +type ByTechnicalAssetQuickWinsAndTitleSort []TechnicalAsset + +func (what ByTechnicalAssetQuickWinsAndTitleSort) Len() int { return len(what) } +func (what ByTechnicalAssetQuickWinsAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTechnicalAssetQuickWinsAndTitleSort) Less(i, j int) bool { + qwLeft := what[i].QuickWins() + qwRight := what[j].QuickWins() + if qwLeft == qwRight { + return what[i].Title < what[j].Title + } + return qwLeft > qwRight +} +*/ + +type ByTechnicalAssetTitleSort []TechnicalAsset + +func (what ByTechnicalAssetTitleSort) Len() int { return len(what) } +func (what ByTechnicalAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTechnicalAssetTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} + +type ByOrderAndIdSort []TechnicalAsset + +func (what ByOrderAndIdSort) Len() int { return len(what) } +func (what ByOrderAndIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByOrderAndIdSort) Less(i, j int) bool { + if what[i].DiagramTweakOrder == what[j].DiagramTweakOrder { + return what[i].Id > what[j].Id + } + return what[i].DiagramTweakOrder < what[j].DiagramTweakOrder +} diff --git a/pkg/security/types/technical_asset_machine.go b/pkg/security/types/technical_asset_machine.go new file mode 100644 index 00000000..f8463805 --- /dev/null +++ b/pkg/security/types/technical_asset_machine.go @@ -0,0 +1,100 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type TechnicalAssetMachine int + +const ( + Physical TechnicalAssetMachine = iota + Virtual + Container + Serverless +) + +func TechnicalAssetMachineValues() []TypeEnum { + return []TypeEnum{ + Physical, + Virtual, + Container, + Serverless, + } +} + +var TechnicalAssetMachineTypeDescription = [...]TypeDescription{ + {"physical", "A physical machine"}, + {"virtual", "A virtual machine"}, + {"container", "A container"}, + {"serverless", "A serverless application"}, +} + +func ParseTechnicalAssetMachine(value string) (technicalAssetMachine TechnicalAssetMachine, err error) { + value = strings.TrimSpace(value) + for _, candidate := range TechnicalAssetMachineValues() { + if candidate.String() == value { + return candidate.(TechnicalAssetMachine), err + } + } + return technicalAssetMachine, errors.New("Unable to parse into type: " + value) +} + +func (what TechnicalAssetMachine) String() string { + return TechnicalAssetMachineTypeDescription[what].Name +} + +func (what TechnicalAssetMachine) Explain() string { + return TechnicalAssetMachineTypeDescription[what].Description +} + +func (what TechnicalAssetMachine) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *TechnicalAssetMachine) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetMachine) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *TechnicalAssetMachine) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetMachine) find(value string) (TechnicalAssetMachine, error) { + for index, description := range TechnicalAssetMachineTypeDescription { + if strings.EqualFold(value, description.Name) { + return TechnicalAssetMachine(index), nil + } + } + + return TechnicalAssetMachine(0), fmt.Errorf("unknown technical asset machine value %q", value) +} diff --git a/pkg/security/types/technical_asset_machine_test.go b/pkg/security/types/technical_asset_machine_test.go new file mode 100644 index 00000000..3f53c685 --- /dev/null +++ b/pkg/security/types/technical_asset_machine_test.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseTechnicalAssetMachineTest struct { + input string + expected TechnicalAssetMachine + expectedError error +} + +func TestParseTechnicalAssetMachine(t *testing.T) { + testCases := map[string]ParseTechnicalAssetMachineTest{ + "physical": { + input: "physical", + expected: Physical, + }, + "virtual": { + input: "virtual", + expected: Virtual, + }, + "container": { + input: "container", + expected: Container, + }, + "serverless": { + input: "serverless", + expected: Serverless, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseTechnicalAssetMachine(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/technical_asset_size.go b/pkg/security/types/technical_asset_size.go new file mode 100644 index 00000000..65ecbc95 --- /dev/null +++ b/pkg/security/types/technical_asset_size.go @@ -0,0 +1,101 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type TechnicalAssetSize int + +const ( + System TechnicalAssetSize = iota + Service + Application + Component +) + +func TechnicalAssetSizeValues() []TypeEnum { + return []TypeEnum{ + System, + Service, + Application, + Component, + } +} + +var TechnicalAssetSizeDescription = [...]TypeDescription{ + {"system", "A system consists of several services"}, + {"service", "A specific service (web, mail, ...)"}, + {"application", "A single application"}, + {"component", "A component of an application (smaller unit like a microservice)"}, +} + +func (what TechnicalAssetSize) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return TechnicalAssetSizeDescription[what].Name +} + +func (what TechnicalAssetSize) Explain() string { + return TechnicalAssetSizeDescription[what].Description +} + +func ParseTechnicalAssetSize(value string) (technicalAssetSize TechnicalAssetSize, err error) { + value = strings.TrimSpace(value) + for _, candidate := range TechnicalAssetSizeValues() { + if candidate.String() == value { + return candidate.(TechnicalAssetSize), err + } + } + return technicalAssetSize, errors.New("Unable to parse into type: " + value) +} + +func (what TechnicalAssetSize) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *TechnicalAssetSize) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetSize) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *TechnicalAssetSize) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetSize) find(value string) (TechnicalAssetSize, error) { + for index, description := range TechnicalAssetSizeDescription { + if strings.EqualFold(value, description.Name) { + return TechnicalAssetSize(index), nil + } + } + + return TechnicalAssetSize(0), fmt.Errorf("unknown technical asset size value %q", value) +} diff --git a/pkg/security/types/technical_asset_size_test.go b/pkg/security/types/technical_asset_size_test.go new file mode 100644 index 00000000..62d34591 --- /dev/null +++ b/pkg/security/types/technical_asset_size_test.go @@ -0,0 +1,52 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseTechnicalAssetSizeTest struct { + input string + expected TechnicalAssetSize + expectedError error +} + +func TestParseTechnicalAssetSize(t *testing.T) { + testCases := map[string]ParseTechnicalAssetSizeTest{ + "service": { + input: "service", + expected: Service, + }, + "system": { + input: "system", + expected: System, + }, + "application": { + input: "application", + expected: Application, + }, + "component": { + input: "component", + expected: Component, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseTechnicalAssetSize(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/technical_asset_technology.go b/pkg/security/types/technical_asset_technology.go new file mode 100644 index 00000000..5083cf88 --- /dev/null +++ b/pkg/security/types/technical_asset_technology.go @@ -0,0 +1,339 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type TechnicalAssetTechnology int + +const ( + UnknownTechnology TechnicalAssetTechnology = iota + ClientSystem + Browser + Desktop + MobileApp + DevOpsClient + WebServer + WebApplication + ApplicationServer + Database + FileServer + LocalFileSystem + ERP + CMS + WebServiceREST + WebServiceSOAP + EJB + SearchIndex + SearchEngine + ServiceRegistry + ReverseProxy + LoadBalancer + BuildPipeline + SourcecodeRepository + ArtifactRegistry + CodeInspectionPlatform + Monitoring + LDAPServer + ContainerPlatform + BatchProcessing + EventListener + IdentityProvider + IdentityStoreLDAP + IdentityStoreDatabase + Tool + CLI + Task + Function + Gateway // TODO rename to API-Gateway to be more clear? + IoTDevice + MessageQueue + StreamProcessing + ServiceMesh + DataLake + BigDataPlatform + ReportEngine + AI + MailServer + Vault + HSM + WAF + IDS + IPS + Scheduler + Mainframe + BlockStorage + Library +) + +func TechnicalAssetTechnologyValues() []TypeEnum { + return []TypeEnum{ + UnknownTechnology, + ClientSystem, + Browser, + Desktop, + MobileApp, + DevOpsClient, + WebServer, + WebApplication, + ApplicationServer, + Database, + FileServer, + LocalFileSystem, + ERP, + CMS, + WebServiceREST, + WebServiceSOAP, + EJB, + SearchIndex, + SearchEngine, + ServiceRegistry, + ReverseProxy, + LoadBalancer, + BuildPipeline, + SourcecodeRepository, + ArtifactRegistry, + CodeInspectionPlatform, + Monitoring, + LDAPServer, + ContainerPlatform, + BatchProcessing, + EventListener, + IdentityProvider, + IdentityStoreLDAP, + IdentityStoreDatabase, + Tool, + CLI, + Task, + Function, + Gateway, + IoTDevice, + MessageQueue, + StreamProcessing, + ServiceMesh, + DataLake, + BigDataPlatform, + ReportEngine, + AI, + MailServer, + Vault, + HSM, + WAF, + IDS, + IPS, + Scheduler, + Mainframe, + BlockStorage, + Library, + } +} + +var TechnicalAssetTechnologyTypeDescription = [...]TypeDescription{ + {"unknown-technology", "Unknown technology"}, + {"client-system", "A client system"}, + {"browser", "A web browser"}, + {"desktop", "A desktop system (or laptop)"}, + {"mobile-app", "A mobile app (smartphone, tablet)"}, + {"devops-client", "A client used for DevOps"}, + {"web-server", "A web server"}, + {"web-application", "A web application"}, + {"application-server", "An application server (Apache Tomcat, ...)"}, + {"database", "A database"}, + {"file-server", "A file server"}, + {"local-file-system", "The local file system"}, + {"erp", "Enterprise-Resource-Planning"}, + {"cms", "Content Management System"}, + {"web-service-rest", "A REST web service (API)"}, + {"web-service-soap", "A SOAP web service (API)"}, + {"ejb", "Jakarta Enterprise Beans fka Enterprise JavaBeans"}, + {"search-index", "The index database of a search engine"}, + {"search-engine", "A search engine"}, + {"service-registry", "A central place where data schemas can be found and distributed"}, + {"reverse-proxy", "A proxy hiding internal infrastructure from caller making requests. Can also reduce load"}, + {"load-balancer", "A load balancer directing incoming requests to available internal infrastructure"}, + {"build-pipeline", "A software build pipeline"}, + {"sourcecode-repository", "Git or similar"}, + {"artifact-registry", "A registry to store build artifacts"}, + {"code-inspection-platform", "(Static) Code Analysis)"}, + {"monitoring", "A monitoring system (SIEM, logs)"}, + {"ldap-server", "A LDAP server"}, + {"container-platform", "A platform for hosting and executing containers"}, + {"batch-processing", "A set of tools automatically processing data"}, + {"event-listener", "An event listener waiting to be triggered and spring to action"}, + {"identity-provider", "A authentication provider"}, + {"identity-store-ldap", "Authentication data as LDAP"}, + {"identity-store-database", "Authentication data as database"}, + {"tool", "A specific tool"}, + {"threagile", "A command line tool"}, + {"task", "A specific task"}, + {"function", "A specific function (maybe RPC ?)"}, + {"gateway", "A gateway connecting two systems or trust boundaries"}, + {"iot-device", "An IoT device"}, + {"message-queue", "A message queue (like MQTT)"}, + {"stream-processing", "Data stream processing"}, + {"service-mesh", "Infrastructure for service-to-service communication"}, + {"data-lake", "A huge database"}, + {"big-data-platform", "Storage for big data"}, + {"report-engine", "Software for report generation"}, + {"ai", "An Artificial Intelligence service"}, + {"mail-server", "A Mail server"}, + {"vault", "Encryption and key management"}, + {"hsm", "Hardware Security Module"}, + {"waf", "Web Application Firewall"}, + {"ids", "Intrusion Detection System"}, + {"ips", "Intrusion Prevention System"}, + {"scheduler", "Scheduled tasks"}, + {"mainframe", "A central, big computer"}, + {"block-storage", "SAN or similar central file storage"}, + {"library", "A software library"}, +} + +func (what TechnicalAssetTechnology) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return TechnicalAssetTechnologyTypeDescription[what].Name +} + +func (what TechnicalAssetTechnology) Explain() string { + return TechnicalAssetTechnologyTypeDescription[what].Description +} + +func ParseTechnicalAssetTechnology(value string) (technicalAssetTechnology TechnicalAssetTechnology, err error) { + value = strings.TrimSpace(value) + for _, candidate := range TechnicalAssetTechnologyValues() { + if candidate.String() == value { + return candidate.(TechnicalAssetTechnology), err + } + } + return technicalAssetTechnology, errors.New("Unable to parse into type: " + value) +} + +func (what TechnicalAssetTechnology) IsWebApplication() bool { + return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == CMS || what == IdentityProvider || what == ReportEngine +} + +func (what TechnicalAssetTechnology) IsWebService() bool { + return what == WebServiceREST || what == WebServiceSOAP +} + +func (what TechnicalAssetTechnology) IsIdentityRelated() bool { + return what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase +} + +func (what TechnicalAssetTechnology) IsSecurityControlRelated() bool { + return what == Vault || what == HSM || what == WAF || what == IDS || what == IPS +} + +func (what TechnicalAssetTechnology) IsUnprotectedCommunicationsTolerated() bool { + return what == Monitoring || what == IDS || what == IPS +} + +func (what TechnicalAssetTechnology) IsUnnecessaryDataTolerated() bool { + return what == Monitoring || what == IDS || what == IPS +} + +func (what TechnicalAssetTechnology) IsCloseToHighValueTargetsTolerated() bool { + return what == Monitoring || what == IDS || what == IPS || what == LoadBalancer || what == ReverseProxy +} + +func (what TechnicalAssetTechnology) IsClient() bool { + return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == IoTDevice +} + +func (what TechnicalAssetTechnology) IsUsuallyAbleToPropagateIdentityToOutgoingTargets() bool { + return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || + what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || + what == CMS || what == WebServiceREST || what == WebServiceSOAP || what == EJB || + what == SearchEngine || what == ReverseProxy || what == LoadBalancer || what == IdentityProvider || + what == Tool || what == CLI || what == Task || what == Function || what == Gateway || + what == IoTDevice || what == MessageQueue || what == ServiceMesh || what == ReportEngine || what == WAF || what == Library + +} + +func (what TechnicalAssetTechnology) IsLessProtectedType() bool { + return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == CMS || + what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == BuildPipeline || what == SourcecodeRepository || + what == ArtifactRegistry || what == CodeInspectionPlatform || what == Monitoring || what == IoTDevice || what == AI || what == MailServer || what == Scheduler || + what == Mainframe +} + +func (what TechnicalAssetTechnology) IsUsuallyProcessingEndUserRequests() bool { + return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == ReportEngine +} + +func (what TechnicalAssetTechnology) IsUsuallyStoringEndUserData() bool { + return what == Database || what == ERP || what == FileServer || what == LocalFileSystem || what == BlockStorage || what == MailServer || what == StreamProcessing || what == MessageQueue +} + +func (what TechnicalAssetTechnology) IsExclusivelyFrontendRelated() bool { + return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == CMS || what == ReverseProxy || what == WAF || what == LoadBalancer || what == Gateway || what == IoTDevice +} + +func (what TechnicalAssetTechnology) IsExclusivelyBackendRelated() bool { + return what == Database || what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == SearchIndex || + what == SearchEngine || what == ContainerPlatform || what == BatchProcessing || what == EventListener || what == DataLake || what == BigDataPlatform || what == MessageQueue || + what == StreamProcessing || what == ServiceMesh || what == Vault || what == HSM || what == Scheduler || what == Mainframe || what == FileServer || what == BlockStorage +} + +func (what TechnicalAssetTechnology) IsDevelopmentRelevant() bool { + return what == BuildPipeline || what == SourcecodeRepository || what == ArtifactRegistry || what == CodeInspectionPlatform || what == DevOpsClient +} + +func (what TechnicalAssetTechnology) IsTrafficForwarding() bool { + return what == LoadBalancer || what == ReverseProxy || what == WAF +} + +func (what TechnicalAssetTechnology) IsEmbeddedComponent() bool { + return what == Library +} + +func (what TechnicalAssetTechnology) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *TechnicalAssetTechnology) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetTechnology) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *TechnicalAssetTechnology) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetTechnology) find(value string) (TechnicalAssetTechnology, error) { + for index, description := range TechnicalAssetTechnologyTypeDescription { + if strings.EqualFold(value, description.Name) { + return TechnicalAssetTechnology(index), nil + } + } + + return TechnicalAssetTechnology(0), fmt.Errorf("unknown technical asset technology value %q", value) +} diff --git a/pkg/security/types/technical_asset_technology_test.go b/pkg/security/types/technical_asset_technology_test.go new file mode 100644 index 00000000..0f1fc086 --- /dev/null +++ b/pkg/security/types/technical_asset_technology_test.go @@ -0,0 +1,264 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseTechnicalAssetTechnologyTest struct { + input string + expected TechnicalAssetTechnology + expectedError error +} + +func TestParseTechnicalAssetTechnology(t *testing.T) { + testCases := map[string]ParseTechnicalAssetTechnologyTest{ + "unknown-technology": { + input: "unknown-technology", + expected: UnknownTechnology, + }, + "client-system": { + input: "client-system", + expected: ClientSystem, + }, + "browser": { + input: "browser", + expected: Browser, + }, + "desktop": { + input: "desktop", + expected: Desktop, + }, + "mobile-app": { + input: "mobile-app", + expected: MobileApp, + }, + "devops-client": { + input: "devops-client", + expected: DevOpsClient, + }, + "web-server": { + input: "web-server", + expected: WebServer, + }, + "web-application": { + input: "web-application", + expected: WebApplication, + }, + "application-server": { + input: "application-server", + expected: ApplicationServer, + }, + "database": { + input: "database", + expected: Database, + }, + "file-server": { + input: "file-server", + expected: FileServer, + }, + "local-file-system": { + input: "local-file-system", + expected: LocalFileSystem, + }, + "erp": { + input: "erp", + expected: ERP, + }, + "cms": { + input: "cms", + expected: CMS, + }, + "web-service-rest": { + input: "web-service-rest", + expected: WebServiceREST, + }, + "web-service-soap": { + input: "web-service-soap", + expected: WebServiceSOAP, + }, + "ejb": { + input: "ejb", + expected: EJB, + }, + "search-index": { + input: "search-index", + expected: SearchIndex, + }, + "search-engine": { + input: "search-engine", + expected: SearchEngine, + }, + "service-registry": { + input: "service-registry", + expected: ServiceRegistry, + }, + "reverse-proxy": { + input: "reverse-proxy", + expected: ReverseProxy, + }, + "load-balancer": { + input: "load-balancer", + expected: LoadBalancer, + }, + "build-pipeline": { + input: "build-pipeline", + expected: BuildPipeline, + }, + "sourcecode-repository": { + input: "sourcecode-repository", + expected: SourcecodeRepository, + }, + "artifact-registry": { + input: "artifact-registry", + expected: ArtifactRegistry, + }, + "code-inspection-platform": { + input: "code-inspection-platform", + expected: CodeInspectionPlatform, + }, + "monitoring": { + input: "monitoring", + expected: Monitoring, + }, + "ldap-server": { + input: "ldap-server", + expected: LDAPServer, + }, + "container-platform": { + input: "container-platform", + expected: ContainerPlatform, + }, + "batch-processing": { + input: "batch-processing", + expected: BatchProcessing, + }, + "event-listener": { + input: "event-listener", + expected: EventListener, + }, + "identity-provider": { + input: "identity-provider", + expected: IdentityProvider, + }, + "identity-store-ldap": { + input: "identity-store-ldap", + expected: IdentityStoreLDAP, + }, + "identity-store-database": { + input: "identity-store-database", + expected: IdentityStoreDatabase, + }, + "tool": { + input: "tool", + expected: Tool, + }, + "threagile": { + input: "threagile", + expected: CLI, + }, + "task": { + input: "task", + expected: Task, + }, + "function": { + input: "function", + expected: Function, + }, + "gateway": { + input: "gateway", + expected: Gateway, + }, + "iot-device": { + input: "iot-device", + expected: IoTDevice, + }, + "message-queue": { + input: "message-queue", + expected: MessageQueue, + }, + "stream-processing": { + input: "stream-processing", + expected: StreamProcessing, + }, + "service-mesh": { + input: "service-mesh", + expected: ServiceMesh, + }, + "data-lake": { + input: "data-lake", + expected: DataLake, + }, + "big-data-platform": { + input: "big-data-platform", + expected: BigDataPlatform, + }, + "report-engine": { + input: "report-engine", + expected: ReportEngine, + }, + "ai": { + input: "ai", + expected: AI, + }, + "mail-server": { + input: "mail-server", + expected: MailServer, + }, + "vault": { + input: "vault", + expected: Vault, + }, + "hsm": { + input: "hsm", + expected: HSM, + }, + "waf": { + input: "waf", + expected: WAF, + }, + "ids": { + input: "ids", + expected: IDS, + }, + "ips": { + input: "ips", + expected: IPS, + }, + "scheduler": { + input: "scheduler", + expected: Scheduler, + }, + "mainframe": { + input: "mainframe", + expected: Mainframe, + }, + "block-storage": { + input: "block-storage", + expected: BlockStorage, + }, + "library": { + input: "library", + expected: Library, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseTechnicalAssetTechnology(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/technical_asset_type.go b/pkg/security/types/technical_asset_type.go new file mode 100644 index 00000000..02f585c2 --- /dev/null +++ b/pkg/security/types/technical_asset_type.go @@ -0,0 +1,98 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type TechnicalAssetType int + +const ( + ExternalEntity TechnicalAssetType = iota + Process + Datastore +) + +func TechnicalAssetTypeValues() []TypeEnum { + return []TypeEnum{ + ExternalEntity, + Process, + Datastore, + } +} + +var TechnicalAssetTypeDescription = [...]TypeDescription{ + {"external-entity", "This asset is hosted and managed by a third party"}, + {"process", "A software process"}, + {"datastore", "This asset stores data"}, +} + +func (what TechnicalAssetType) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return TechnicalAssetTypeDescription[what].Name +} + +func (what TechnicalAssetType) Explain() string { + return TechnicalAssetTypeDescription[what].Description +} + +func ParseTechnicalAssetType(value string) (technicalAssetType TechnicalAssetType, err error) { + value = strings.TrimSpace(value) + for _, candidate := range TechnicalAssetTypeValues() { + if candidate.String() == value { + return candidate.(TechnicalAssetType), err + } + } + return technicalAssetType, errors.New("Unable to parse into type: " + value) +} + +func (what TechnicalAssetType) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *TechnicalAssetType) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetType) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *TechnicalAssetType) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TechnicalAssetType) find(value string) (TechnicalAssetType, error) { + for index, description := range TechnicalAssetTypeDescription { + if strings.EqualFold(value, description.Name) { + return TechnicalAssetType(index), nil + } + } + + return TechnicalAssetType(0), fmt.Errorf("unknown technical asset type value %q", value) +} diff --git a/pkg/security/types/technical_asset_type_test.go b/pkg/security/types/technical_asset_type_test.go new file mode 100644 index 00000000..a61cecb2 --- /dev/null +++ b/pkg/security/types/technical_asset_type_test.go @@ -0,0 +1,48 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseTechnicalAssetTypeTest struct { + input string + expected TechnicalAssetType + expectedError error +} + +func TestParseTechnicalAssetType(t *testing.T) { + testCases := map[string]ParseTechnicalAssetTypeTest{ + "external-entity": { + input: "external-entity", + expected: ExternalEntity, + }, + "process": { + input: "process", + expected: Process, + }, + "datastore": { + input: "datastore", + expected: Datastore, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseTechnicalAssetType(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/trust_boundary.go b/pkg/security/types/trust_boundary.go new file mode 100644 index 00000000..15c1d8c7 --- /dev/null +++ b/pkg/security/types/trust_boundary.go @@ -0,0 +1,127 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "sort" +) + +type TrustBoundary struct { + Id string `json:"id,omitempty" yaml:"id,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Type TrustBoundaryType `json:"type,omitempty" yaml:"type,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + TechnicalAssetsInside []string `json:"technical_assets_inside,omitempty" yaml:"technical_assets_inside,omitempty"` + TrustBoundariesNested []string `json:"trust_boundaries_nested,omitempty" yaml:"trust_boundaries_nested,omitempty"` +} + +func (what TrustBoundary) RecursivelyAllTechnicalAssetIDsInside(model *ParsedModel) []string { + result := make([]string, 0) + what.addAssetIDsRecursively(model, &result) + return result +} + +func (what TrustBoundary) IsTaggedWithAny(tags ...string) bool { + return containsCaseInsensitiveAny(what.Tags, tags...) +} + +func (what TrustBoundary) IsTaggedWithBaseTag(baseTag string) bool { + return IsTaggedWithBaseTag(what.Tags, baseTag) +} + +func (what TrustBoundary) IsTaggedWithAnyTraversingUp(model *ParsedModel, tags ...string) bool { + if what.IsTaggedWithAny(tags...) { + return true + } + parentID := what.ParentTrustBoundaryID(model) + if len(parentID) > 0 && model.TrustBoundaries[parentID].IsTaggedWithAnyTraversingUp(model, tags...) { + return true + } + return false +} + +func (what TrustBoundary) ParentTrustBoundaryID(model *ParsedModel) string { + var result string + for _, candidate := range model.TrustBoundaries { + if contains(candidate.TrustBoundariesNested, what.Id) { + result = candidate.Id + return result + } + } + return result +} + +func (what TrustBoundary) HighestConfidentiality(model *ParsedModel) Confidentiality { + highest := Public + for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestConfidentiality(model) > highest { + highest = techAsset.HighestConfidentiality(model) + } + } + return highest +} + +func (what TrustBoundary) HighestIntegrity(model *ParsedModel) Criticality { + highest := Archive + for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestIntegrity(model) > highest { + highest = techAsset.HighestIntegrity(model) + } + } + return highest +} + +func (what TrustBoundary) HighestAvailability(model *ParsedModel) Criticality { + highest := Archive + for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) { + techAsset := model.TechnicalAssets[id] + if techAsset.HighestAvailability(model) > highest { + highest = techAsset.HighestAvailability(model) + } + } + return highest +} + +func (what TrustBoundary) AllParentTrustBoundaryIDs(model *ParsedModel) []string { + result := make([]string, 0) + what.addTrustBoundaryIDsRecursively(model, &result) + return result +} + +func (what TrustBoundary) addAssetIDsRecursively(model *ParsedModel, result *[]string) { + *result = append(*result, what.TechnicalAssetsInside...) + for _, nestedBoundaryID := range what.TrustBoundariesNested { + model.TrustBoundaries[nestedBoundaryID].addAssetIDsRecursively(model, result) + } +} + +func (what TrustBoundary) addTrustBoundaryIDsRecursively(model *ParsedModel, result *[]string) { + *result = append(*result, what.Id) + parentID := what.ParentTrustBoundaryID(model) + if len(parentID) > 0 { + model.TrustBoundaries[parentID].addTrustBoundaryIDsRecursively(model, result) + } +} + +// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way: +func SortedKeysOfTrustBoundaries(model *ParsedModel) []string { + keys := make([]string, 0) + for k := range model.TrustBoundaries { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +type ByTrustBoundaryTitleSort []TrustBoundary + +func (what ByTrustBoundaryTitleSort) Len() int { return len(what) } +func (what ByTrustBoundaryTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] } +func (what ByTrustBoundaryTitleSort) Less(i, j int) bool { + return what[i].Title < what[j].Title +} diff --git a/pkg/security/types/trust_boundary_type.go b/pkg/security/types/trust_boundary_type.go new file mode 100644 index 00000000..9a9a005d --- /dev/null +++ b/pkg/security/types/trust_boundary_type.go @@ -0,0 +1,118 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type TrustBoundaryType int + +const ( + NetworkOnPrem TrustBoundaryType = iota + NetworkDedicatedHoster + NetworkVirtualLAN + NetworkCloudProvider + NetworkCloudSecurityGroup + NetworkPolicyNamespaceIsolation + ExecutionEnvironment +) + +func TrustBoundaryTypeValues() []TypeEnum { + return []TypeEnum{ + NetworkOnPrem, + NetworkDedicatedHoster, + NetworkVirtualLAN, + NetworkCloudProvider, + NetworkCloudSecurityGroup, + NetworkPolicyNamespaceIsolation, + ExecutionEnvironment, + } +} + +var TrustBoundaryTypeDescription = [...]TypeDescription{ + {"network-on-prem", "The whole network is on prem"}, + {"network-dedicated-hoster", "The network is at a dedicated hoster"}, + {"network-virtual-lan", "Network is a VLAN"}, + {"network-cloud-provider", "Network is at a cloud provider"}, + {"network-cloud-security-group", "Cloud rules controlling network traffic"}, + {"network-policy-namespace-isolation", "Segregation in a Kubernetes cluster"}, + {"execution-environment", "Logical group of items (not a protective network boundary in that sense). More like a namespace or another logical group of items"}, +} + +func ParseTrustBoundary(value string) (trustBoundary TrustBoundaryType, err error) { + value = strings.TrimSpace(value) + for _, candidate := range TrustBoundaryTypeValues() { + if candidate.String() == value { + return candidate.(TrustBoundaryType), err + } + } + return trustBoundary, errors.New("Unable to parse into type: " + value) +} + +func (what TrustBoundaryType) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + return TrustBoundaryTypeDescription[what].Name +} + +func (what TrustBoundaryType) Explain() string { + return TrustBoundaryTypeDescription[what].Description +} + +func (what TrustBoundaryType) IsNetworkBoundary() bool { + return what == NetworkOnPrem || what == NetworkDedicatedHoster || what == NetworkVirtualLAN || + what == NetworkCloudProvider || what == NetworkCloudSecurityGroup || what == NetworkPolicyNamespaceIsolation +} + +func (what TrustBoundaryType) IsWithinCloud() bool { + return what == NetworkCloudProvider || what == NetworkCloudSecurityGroup +} + +func (what TrustBoundaryType) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *TrustBoundaryType) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TrustBoundaryType) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *TrustBoundaryType) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what TrustBoundaryType) find(value string) (TrustBoundaryType, error) { + for index, description := range TrustBoundaryTypeDescription { + if strings.EqualFold(value, description.Name) { + return TrustBoundaryType(index), nil + } + } + + return TrustBoundaryType(0), fmt.Errorf("unknown trust boundary type value %q", value) +} diff --git a/pkg/security/types/trust_boundary_type_test.go b/pkg/security/types/trust_boundary_type_test.go new file mode 100644 index 00000000..d93d00a1 --- /dev/null +++ b/pkg/security/types/trust_boundary_type_test.go @@ -0,0 +1,63 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseTrustBoundaryTest struct { + input string + expected TrustBoundaryType + expectedError error +} + +func TestParseTrustBoundaryType(t *testing.T) { + testCases := map[string]ParseTrustBoundaryTest{ + "network-on-prem": { + input: "network-on-prem", + expected: NetworkOnPrem, + }, + "network-dedicated-hoster": { + input: "network-dedicated-hoster", + expected: NetworkDedicatedHoster, + }, + "network-virtual-lan": { + input: "network-virtual-lan", + expected: NetworkVirtualLAN, + }, + "network-cloud-provider": { + input: "network-cloud-provider", + expected: NetworkCloudProvider, + }, + "network-cloud-security-group": { + input: "network-cloud-security-group", + expected: NetworkCloudSecurityGroup, + }, + "network-policy-namespace-isolation": { + input: "network-policy-namespace-isolation", + expected: NetworkPolicyNamespaceIsolation, + }, + "execution-environment": { + input: "execution-environment", + expected: ExecutionEnvironment, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseTrustBoundary(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/security/types/types.go b/pkg/security/types/types.go new file mode 100644 index 00000000..78b669a8 --- /dev/null +++ b/pkg/security/types/types.go @@ -0,0 +1,42 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +// TypeDescription contains a name for a type and its description +type TypeDescription struct { + Name string + Description string +} + +type TypeEnum interface { + String() string + Explain() string +} + +func GetBuiltinTypeValues() map[string][]TypeEnum { + return map[string][]TypeEnum{ + "Authentication": AuthenticationValues(), + "Authorization": AuthorizationValues(), + "Confidentiality": ConfidentialityValues(), + "Criticality (for integrity and availability)": CriticalityValues(), + "Data Breach Probability": DataBreachProbabilityValues(), + "Data Format": DataFormatValues(), + "Encryption": EncryptionStyleValues(), + "Protocol": ProtocolValues(), + "Quantity": QuantityValues(), + "Risk Exploitation Impact": RiskExploitationImpactValues(), + "Risk Exploitation Likelihood": RiskExploitationLikelihoodValues(), + "Risk Function": RiskFunctionValues(), + "Risk Severity": RiskSeverityValues(), + "Risk Status": RiskStatusValues(), + "STRIDE": STRIDEValues(), + "Technical Asset Machine": TechnicalAssetMachineValues(), + "Technical Asset Size": TechnicalAssetSizeValues(), + "Technical Asset Technology": TechnicalAssetTechnologyValues(), + "Technical Asset Type": TechnicalAssetTypeValues(), + "Trust Boundary Type": TrustBoundaryTypeValues(), + "Usage": UsageValues(), + } +} diff --git a/pkg/security/types/usage.go b/pkg/security/types/usage.go new file mode 100644 index 00000000..d0eee3d3 --- /dev/null +++ b/pkg/security/types/usage.go @@ -0,0 +1,99 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package types + +import ( + "encoding/json" + "errors" + "fmt" + "gopkg.in/yaml.v3" + "strings" +) + +type Usage int + +const ( + Business Usage = iota + DevOps +) + +func UsageValues() []TypeEnum { + return []TypeEnum{ + Business, + DevOps, + } +} + +func ParseUsage(value string) (usage Usage, err error) { + value = strings.TrimSpace(value) + for _, candidate := range UsageValues() { + if candidate.String() == value { + return candidate.(Usage), err + } + } + return usage, errors.New("Unable to parse into type: " + value) +} + +var UsageTypeDescription = [...]TypeDescription{ + {"business", "This system is operational and does business tasks"}, + {"devops", "This system is for development and/or deployment or other operational tasks"}, +} + +func (what Usage) String() string { + // NOTE: maintain list also in schema.json for validation in IDEs + //return [...]string{"business", "devops"}[what] + return UsageTypeDescription[what].Name +} + +func (what Usage) Explain() string { + return UsageTypeDescription[what].Description +} + +func (what Usage) Title() string { + return [...]string{"Business", "DevOps"}[what] +} + +func (what Usage) MarshalJSON() ([]byte, error) { + return json.Marshal(what.String()) +} + +func (what *Usage) UnmarshalJSON(data []byte) error { + var text string + unmarshalError := json.Unmarshal(data, &text) + if unmarshalError != nil { + return unmarshalError + } + + value, findError := what.find(text) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Usage) MarshalYAML() (interface{}, error) { + return what.String(), nil +} + +func (what *Usage) UnmarshalYAML(node *yaml.Node) error { + value, findError := what.find(node.Value) + if findError != nil { + return findError + } + + *what = value + return nil +} + +func (what Usage) find(value string) (Usage, error) { + for index, description := range UsageTypeDescription { + if strings.EqualFold(value, description.Name) { + return Usage(index), nil + } + } + + return Usage(0), fmt.Errorf("unknown usage type value %q", value) +} diff --git a/pkg/security/types/usage_test.go b/pkg/security/types/usage_test.go new file mode 100644 index 00000000..fafb08f1 --- /dev/null +++ b/pkg/security/types/usage_test.go @@ -0,0 +1,44 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package types + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ParseUsageTest struct { + input string + expected Usage + expectedError error +} + +func TestParseUsage(t *testing.T) { + testCases := map[string]ParseUsageTest{ + "business": { + input: "business", + expected: Business, + }, + "devops": { + input: "devops", + expected: DevOps, + }, + "unknown": { + input: "unknown", + expectedError: errors.New("Unable to parse into type: unknown"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + actual, err := ParseUsage(testCase.input) + + assert.Equal(t, testCase.expected, actual) + assert.Equal(t, testCase.expectedError, err) + }) + } +} diff --git a/pkg/server/execute.go b/pkg/server/execute.go new file mode 100644 index 00000000..6fc34fd7 --- /dev/null +++ b/pkg/server/execute.go @@ -0,0 +1,229 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/gin-gonic/gin" +) + +func (s *server) analyze(ginContext *gin.Context) { + s.execute(ginContext, false) +} + +func (s *server) check(ginContext *gin.Context) { + _, ok := s.execute(ginContext, true) + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model is ok", + }) + } +} + +func (s *server) execute(ginContext *gin.Context, dryRun bool) (yamlContent []byte, ok bool) { + defer func() { + var err error + if r := recover(); r != nil { + s.errorCount++ + err = r.(error) + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": strings.TrimSpace(err.Error()), + }) + ok = false + } + }() + + dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + + fileUploaded, header, err := ginContext.Request.FormFile("file") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + + if header.Size > 50000000 { + msg := "maximum model upload file size exceeded (denial-of-service protection)" + log.Println(msg) + ginContext.JSON(http.StatusRequestEntityTooLarge, gin.H{ + "error": msg, + }) + return yamlContent, false + } + + filenameUploaded := strings.TrimSpace(header.Filename) + + tmpInputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-input-") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + defer func() { _ = os.RemoveAll(tmpInputDir) }() + + tmpModelFile, err := os.CreateTemp(tmpInputDir, "threagile-model-*") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + defer func() { _ = os.Remove(tmpModelFile.Name()) }() + _, err = io.Copy(tmpModelFile, fileUploaded) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + + yamlFile := tmpModelFile.Name() + + if strings.ToLower(filepath.Ext(filenameUploaded)) == ".zip" { + // unzip first (including the resources like images etc.) + if s.config.Verbose { + fmt.Println("Decompressing uploaded archive") + } + filenamesUnzipped, err := unzip(tmpModelFile.Name(), tmpInputDir) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + found := false + for _, name := range filenamesUnzipped { + if strings.ToLower(filepath.Ext(name)) == ".yaml" { + yamlFile = name + found = true + break + } + } + if !found { + panic(errors.New("no yaml file found in uploaded archive")) + } + } + + tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-output-") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + defer func() { _ = os.RemoveAll(tmpOutputDir) }() + + tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-result-*.zip") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + defer func() { _ = os.Remove(tmpResultFile.Name()) }() + + if dryRun { + s.doItViaRuntimeCall(yamlFile, tmpOutputDir, false, false, false, false, false, true, true, true, 40) + } else { + s.doItViaRuntimeCall(yamlFile, tmpOutputDir, true, true, true, true, true, true, true, true, dpi) + } + + yamlContent, err = os.ReadFile(filepath.Clean(yamlFile)) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + err = os.WriteFile(filepath.Join(tmpOutputDir, s.config.InputFile), yamlContent, 0400) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + + if !dryRun { + files := []string{ + filepath.Join(tmpOutputDir, s.config.InputFile), + filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG), + filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG), + filepath.Join(tmpOutputDir, s.config.ReportFilename), + filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename), + filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename), + filepath.Join(tmpOutputDir, s.config.JsonRisksFilename), + filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename), + filepath.Join(tmpOutputDir, s.config.JsonStatsFilename), + } + if s.config.KeepDiagramSourceFiles { + files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG)) + files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenameDOT)) + } + err = zipFiles(tmpResultFile.Name(), files) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return yamlContent, false + } + if s.config.Verbose { + log.Println("Streaming back result file: " + tmpResultFile.Name()) + } + ginContext.FileAttachment(tmpResultFile.Name(), "threagile-result.zip") + } + s.successCount++ + return yamlContent, true +} + +// ultimately to avoid any in-process memory and/or data leaks by the used third party libs like PDF generation: exec and quit +func (s *server) doItViaRuntimeCall(modelFile string, outputDir string, + generateDataFlowDiagram, generateDataAssetDiagram, generateReportPdf, generateRisksExcel, generateTagsExcel, generateRisksJSON, generateTechnicalAssetsJSON, generateStatsJSON bool, + dpi int) { + // Remember to also add the same args to the exec based sub-process calls! + var cmd *exec.Cmd + args := []string{"-model", modelFile, "-output", outputDir, "-execute-model-macro", s.config.ExecuteModelMacro, "-raa-run", s.config.RAAPlugin, "-custom-risk-rules-plugins", strings.Join(s.config.RiskRulesPlugins, ","), "-skip-risk-rules", s.config.SkipRiskRules, "-diagram-dpi", strconv.Itoa(dpi)} + if s.config.Verbose { + args = append(args, "-verbose") + } + if s.config.IgnoreOrphanedRiskTracking { // TODO why add all them as arguments, when they are also variables on outer level? + args = append(args, "-ignore-orphaned-risk-tracking") + } + if generateDataFlowDiagram { + args = append(args, "-generate-data-flow-diagram") + } + if generateDataAssetDiagram { + args = append(args, "-generate-data-asset-diagram") + } + if generateReportPdf { + args = append(args, "-generate-report-pdf") + } + if generateRisksExcel { + args = append(args, "-generate-risks-excel") + } + if generateTagsExcel { + args = append(args, "-generate-tags-excel") + } + if generateRisksJSON { + args = append(args, "-generate-risks-json") + } + if generateTechnicalAssetsJSON { + args = append(args, "-generate-technical-assets-json") + } + if generateStatsJSON { + args = append(args, "-generate-stats-json") + } + self, nameError := os.Executable() + if nameError != nil { + panic(nameError) + } + + cmd = exec.Command(self, args...) // #nosec G204 + out, err := cmd.CombinedOutput() + if err != nil { + panic(errors.New(string(out))) + } else { + if s.config.Verbose && len(out) > 0 { + fmt.Println("---") + fmt.Print(string(out)) + fmt.Println("---") + } + } +} diff --git a/pkg/server/hash.go b/pkg/server/hash.go new file mode 100644 index 00000000..89072380 --- /dev/null +++ b/pkg/server/hash.go @@ -0,0 +1,35 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "crypto/sha512" + "encoding/hex" + "errors" + "fmt" + "hash/fnv" +) + +func xor(key []byte, xor []byte) []byte { + if len(key) != len(xor) { + panic(errors.New("key length not matching XOR length")) + } + result := make([]byte, len(xor)) + for i, b := range key { + result[i] = b ^ xor[i] + } + return result +} + +func hashSHA256(key []byte) string { + hasher := sha512.New() + hasher.Write(key) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func hash(s string) string { + h := fnv.New32a() + _, _ = h.Write([]byte(s)) + return fmt.Sprintf("%v", h.Sum32()) +} diff --git a/pkg/server/model.go b/pkg/server/model.go new file mode 100644 index 00000000..af8a2a23 --- /dev/null +++ b/pkg/server/model.go @@ -0,0 +1,1374 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "bytes" + "compress/gzip" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "gopkg.in/yaml.v3" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/input" + "github.com/threagile/threagile/pkg/security/types" + "golang.org/x/crypto/argon2" +) + +// creates a sub-folder (named by a new UUID) inside the token folder +func (s *server) createNewModel(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + ok = s.checkObjectCreationThrottler(ginContext, "MODEL") + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + + aUuid := uuid.New().String() + err := os.Mkdir(folderNameForModel(folderNameOfKey, aUuid), 0700) + if err != nil { + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to create model", + }) + return + } + + aYaml := `title: New Threat Model +threagile_version: ` + docs.ThreagileVersion + ` +author: + name: "" + homepage: "" +date: +business_overview: + description: "" + images: [] +technical_overview: + description: "" + images: [] +business_criticality: "" +management_summary_comment: "" +questions: {} +abuse_cases: {} +security_requirements: {} +tags_available: [] +data_assets: {} +technical_assets: {} +trust_boundaries: {} +shared_runtimes: {} +individual_risk_categories: {} +risk_tracking: {} +diagram_tweak_nodesep: "" +diagram_tweak_ranksep: "" +diagram_tweak_edge_layout: "" +diagram_tweak_suppress_edge_labels: false +diagram_tweak_invisible_connections_between_assets: [] +diagram_tweak_same_rank_assets: []` + + ok = s.writeModelYAML(ginContext, aYaml, key, folderNameForModel(folderNameOfKey, aUuid), "New Model Creation", true) + if ok { + ginContext.JSON(http.StatusCreated, gin.H{ + "message": "model created", + "id": aUuid, + }) + } +} + +type payloadModels struct { + ID string `yaml:"id" json:"id"` + Title string `yaml:"title" json:"title"` + TimestampCreated time.Time `yaml:"timestamp_created" json:"timestamp_created"` + TimestampModified time.Time `yaml:"timestamp_modified" json:"timestamp_modified"` +} + +func (s *server) listModels(ginContext *gin.Context) { // TODO currently returns error when any model is no longer valid in syntax, so eventually have some fallback to not just bark on an invalid model... + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + + result := make([]payloadModels, 0) + modelFolders, err := os.ReadDir(folderNameOfKey) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return + } + for _, dirEntry := range modelFolders { + if dirEntry.IsDir() { + modelStat, err := os.Stat(filepath.Join(folderNameOfKey, dirEntry.Name(), s.config.InputFile)) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "unable to list model", + }) + return + } + aModel, _, ok := s.readModel(ginContext, dirEntry.Name(), key, folderNameOfKey) + if !ok { + return + } + fileInfo, err := dirEntry.Info() + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "unable to get file info", + }) + return + } + result = append(result, payloadModels{ + ID: dirEntry.Name(), + Title: aModel.Title, + TimestampCreated: fileInfo.ModTime(), + TimestampModified: modelStat.ModTime(), + }) + } + } + ginContext.JSON(http.StatusOK, result) +} + +func (s *server) deleteModel(ginContext *gin.Context) { + folderNameOfKey, _, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + folder, ok := s.checkModelFolder(ginContext, ginContext.Param("model-id"), folderNameOfKey) + if ok { + if folder != filepath.Clean(folder) { + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "model-id is weird", + }) + return + } + err := os.RemoveAll(folder) + if err != nil { + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "model not found", + }) + return + } + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model deleted", + }) + } +} + +type payloadCover struct { + Title string `yaml:"title" json:"title"` + Date time.Time `yaml:"date" json:"date"` + Author input.Author `yaml:"author" json:"author"` +} + +func (s *server) setCover(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadCover{} + err := ginContext.BindJSON(&payload) + if err != nil { + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + modelInput.Title = payload.Title + if !payload.Date.IsZero() { + modelInput.Date = payload.Date.Format("2006-01-02") + } + modelInput.Author.Name = payload.Author.Name + modelInput.Author.Homepage = payload.Author.Homepage + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Cover Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model updated", + }) + } + } +} + +func (s *server) getCover(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "title": aModel.Title, + "date": aModel.Date, + "author": aModel.Author, + }) + } +} + +type payloadOverview struct { + ManagementSummaryComment string `yaml:"management_summary_comment" json:"management_summary_comment"` + BusinessCriticality string `yaml:"business_criticality" json:"business_criticality"` + BusinessOverview input.Overview `yaml:"business_overview" json:"business_overview"` + TechnicalOverview input.Overview `yaml:"technical_overview" json:"technical_overview"` +} + +func (s *server) setOverview(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadOverview{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + criticality, err := types.ParseCriticality(payload.BusinessCriticality) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + modelInput.ManagementSummaryComment = payload.ManagementSummaryComment + modelInput.BusinessCriticality = criticality.String() + modelInput.BusinessOverview.Description = payload.BusinessOverview.Description + modelInput.BusinessOverview.Images = payload.BusinessOverview.Images + modelInput.TechnicalOverview.Description = payload.TechnicalOverview.Description + modelInput.TechnicalOverview.Images = payload.TechnicalOverview.Images + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Overview Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model updated", + }) + } + } +} + +func (s *server) getOverview(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "management_summary_comment": aModel.ManagementSummaryComment, + "business_criticality": aModel.BusinessCriticality, + "business_overview": aModel.BusinessOverview, + "technical_overview": aModel.TechnicalOverview, + }) + } +} + +type payloadAbuseCases map[string]string + +func (s *server) setAbuseCases(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadAbuseCases{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + modelInput.AbuseCases = payload + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Abuse Cases Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model updated", + }) + } + } +} + +func (s *server) getAbuseCases(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, aModel.AbuseCases) + } +} + +type payloadSecurityRequirements map[string]string + +func (s *server) setSecurityRequirements(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadSecurityRequirements{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + modelInput.SecurityRequirements = payload + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Security Requirements Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "model updated", + }) + } + } +} + +func (s *server) getSecurityRequirements(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, aModel.SecurityRequirements) + } +} + +type payloadDataAsset struct { + Title string `yaml:"title" json:"title"` + Id string `yaml:"id" json:"id"` + Description string `yaml:"description" json:"description"` + Usage string `yaml:"usage" json:"usage"` + Tags []string `yaml:"tags" json:"tags"` + Origin string `yaml:"origin" json:"origin"` + Owner string `yaml:"owner" json:"owner"` + Quantity string `yaml:"quantity" json:"quantity"` + Confidentiality string `yaml:"confidentiality" json:"confidentiality"` + Integrity string `yaml:"integrity" json:"integrity"` + Availability string `yaml:"availability" json:"availability"` + JustificationCiaRating string `yaml:"justification_cia_rating" json:"justification_cia_rating"` +} + +func (s *server) getDataAssets(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, aModel.DataAssets) + } +} + +func (s *server) getDataAsset(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, dataAsset := range modelInput.DataAssets { + if dataAsset.ID == ginContext.Param("data-asset-id") { + ginContext.JSON(http.StatusOK, gin.H{ + title: dataAsset, + }) + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "data asset not found", + }) + } +} + +func (s *server) deleteDataAsset(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + referencesDeleted := false + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, dataAsset := range modelInput.DataAssets { + if dataAsset.ID == ginContext.Param("data-asset-id") { + // also remove all usages of this data asset !! + for _, techAsset := range modelInput.TechnicalAssets { + if techAsset.DataAssetsProcessed != nil { + for i, parsedChangeCandidateAsset := range techAsset.DataAssetsProcessed { + referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) + if referencedAsset == dataAsset.ID { // apply the removal + referencesDeleted = true + // Remove the element at index i + // TODO needs more testing + copy(techAsset.DataAssetsProcessed[i:], techAsset.DataAssetsProcessed[i+1:]) // Shift a[i+1:] left one index. + techAsset.DataAssetsProcessed[len(techAsset.DataAssetsProcessed)-1] = "" // Erase last element (write zero value). + techAsset.DataAssetsProcessed = techAsset.DataAssetsProcessed[:len(techAsset.DataAssetsProcessed)-1] // Truncate slice. + } + } + } + if techAsset.DataAssetsStored != nil { + for i, parsedChangeCandidateAsset := range techAsset.DataAssetsStored { + referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) + if referencedAsset == dataAsset.ID { // apply the removal + referencesDeleted = true + // Remove the element at index i + // TODO needs more testing + copy(techAsset.DataAssetsStored[i:], techAsset.DataAssetsStored[i+1:]) // Shift a[i+1:] left one index. + techAsset.DataAssetsStored[len(techAsset.DataAssetsStored)-1] = "" // Erase last element (write zero value). + techAsset.DataAssetsStored = techAsset.DataAssetsStored[:len(techAsset.DataAssetsStored)-1] // Truncate slice. + } + } + } + if techAsset.CommunicationLinks != nil { + for title, commLink := range techAsset.CommunicationLinks { + for i, dataAssetSent := range commLink.DataAssetsSent { + referencedAsset := fmt.Sprintf("%v", dataAssetSent) + if referencedAsset == dataAsset.ID { // apply the removal + referencesDeleted = true + // Remove the element at index i + // TODO needs more testing + copy(techAsset.CommunicationLinks[title].DataAssetsSent[i:], techAsset.CommunicationLinks[title].DataAssetsSent[i+1:]) // Shift a[i+1:] left one index. + techAsset.CommunicationLinks[title].DataAssetsSent[len(techAsset.CommunicationLinks[title].DataAssetsSent)-1] = "" // Erase last element (write zero value). + x := techAsset.CommunicationLinks[title] + x.DataAssetsSent = techAsset.CommunicationLinks[title].DataAssetsSent[:len(techAsset.CommunicationLinks[title].DataAssetsSent)-1] // Truncate slice. + techAsset.CommunicationLinks[title] = x + } + } + for i, dataAssetReceived := range commLink.DataAssetsReceived { + referencedAsset := fmt.Sprintf("%v", dataAssetReceived) + if referencedAsset == dataAsset.ID { // apply the removal + referencesDeleted = true + // Remove the element at index i + // TODO needs more testing + copy(techAsset.CommunicationLinks[title].DataAssetsReceived[i:], techAsset.CommunicationLinks[title].DataAssetsReceived[i+1:]) // Shift a[i+1:] left one index. + techAsset.CommunicationLinks[title].DataAssetsReceived[len(techAsset.CommunicationLinks[title].DataAssetsReceived)-1] = "" // Erase last element (write zero value). + x := techAsset.CommunicationLinks[title] + x.DataAssetsReceived = techAsset.CommunicationLinks[title].DataAssetsReceived[:len(techAsset.CommunicationLinks[title].DataAssetsReceived)-1] // Truncate slice. + techAsset.CommunicationLinks[title] = x + } + } + } + } + } + for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories { + if individualRiskCat.RisksIdentified != nil { + for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified { + if individualRiskInstance.MostRelevantDataAsset == dataAsset.ID { // apply the removal + referencesDeleted = true + x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] + x.MostRelevantDataAsset = "" // TODO needs more testing + modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x + } + } + } + } + // remove it itself + delete(modelInput.DataAssets, title) + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Deletion") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "data asset deleted", + "id": dataAsset.ID, + "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well + }) + } + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "data asset not found", + }) + } +} + +func (s *server) setDataAsset(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, dataAsset := range modelInput.DataAssets { + if dataAsset.ID == ginContext.Param("data-asset-id") { + payload := payloadDataAsset{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + dataAssetInput, ok := s.populateDataAsset(ginContext, payload) + if !ok { + return + } + // in order to also update the title, remove the asset from the map and re-insert it (with new key) + delete(modelInput.DataAssets, title) + modelInput.DataAssets[payload.Title] = dataAssetInput + idChanged := dataAssetInput.ID != dataAsset.ID + if idChanged { // ID-CHANGE-PROPAGATION + // also update all usages to point to the new (changed) ID !! + for techAssetTitle, techAsset := range modelInput.TechnicalAssets { + if techAsset.DataAssetsProcessed != nil { + for i, parsedChangeCandidateAsset := range techAsset.DataAssetsProcessed { + referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) + if referencedAsset == dataAsset.ID { // apply the ID change + modelInput.TechnicalAssets[techAssetTitle].DataAssetsProcessed[i] = dataAssetInput.ID + } + } + } + if techAsset.DataAssetsStored != nil { + for i, parsedChangeCandidateAsset := range techAsset.DataAssetsStored { + referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset) + if referencedAsset == dataAsset.ID { // apply the ID change + modelInput.TechnicalAssets[techAssetTitle].DataAssetsStored[i] = dataAssetInput.ID + } + } + } + if techAsset.CommunicationLinks != nil { + for title, commLink := range techAsset.CommunicationLinks { + for i, dataAssetSent := range commLink.DataAssetsSent { + referencedAsset := fmt.Sprintf("%v", dataAssetSent) + if referencedAsset == dataAsset.ID { // apply the ID change + modelInput.TechnicalAssets[techAssetTitle].CommunicationLinks[title].DataAssetsSent[i] = dataAssetInput.ID + } + } + for i, dataAssetReceived := range commLink.DataAssetsReceived { + referencedAsset := fmt.Sprintf("%v", dataAssetReceived) + if referencedAsset == dataAsset.ID { // apply the ID change + modelInput.TechnicalAssets[techAssetTitle].CommunicationLinks[title].DataAssetsReceived[i] = dataAssetInput.ID + } + } + } + } + } + for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories { + if individualRiskCat.RisksIdentified != nil { + for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified { + if individualRiskInstance.MostRelevantDataAsset == dataAsset.ID { // apply the ID change + x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] + x.MostRelevantDataAsset = dataAssetInput.ID // TODO needs more testing + modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x + } + } + } + } + } + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "data asset updated", + "id": dataAssetInput.ID, + "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded + }) + } + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "data asset not found", + }) + } +} + +func (s *server) createNewDataAsset(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadDataAsset{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + // yes, here keyed by title in YAML for better readability in the YAML file itself + if _, exists := modelInput.DataAssets[payload.Title]; exists { + ginContext.JSON(http.StatusConflict, gin.H{ + "error": "data asset with this title already exists", + }) + return + } + // but later it will in memory keyed by its "id", so do this uniqueness check also + for _, asset := range modelInput.DataAssets { + if asset.ID == payload.Id { + ginContext.JSON(http.StatusConflict, gin.H{ + "error": "data asset with this id already exists", + }) + return + } + } + dataAssetInput, ok := s.populateDataAsset(ginContext, payload) + if !ok { + return + } + if modelInput.DataAssets == nil { + modelInput.DataAssets = make(map[string]input.DataAsset) + } + modelInput.DataAssets[payload.Title] = dataAssetInput + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Creation") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "data asset created", + "id": dataAssetInput.ID, + }) + } + } +} + +func (s *server) populateDataAsset(ginContext *gin.Context, payload payloadDataAsset) (dataAssetInput input.DataAsset, ok bool) { + usage, err := types.ParseUsage(payload.Usage) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return dataAssetInput, false + } + quantity, err := types.ParseQuantity(payload.Quantity) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return dataAssetInput, false + } + confidentiality, err := types.ParseConfidentiality(payload.Confidentiality) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return dataAssetInput, false + } + integrity, err := types.ParseCriticality(payload.Integrity) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return dataAssetInput, false + } + availability, err := types.ParseCriticality(payload.Availability) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return dataAssetInput, false + } + dataAssetInput = input.DataAsset{ + ID: payload.Id, + Description: payload.Description, + Usage: usage.String(), + Tags: lowerCaseAndTrim(payload.Tags), + Origin: payload.Origin, + Owner: payload.Owner, + Quantity: quantity.String(), + Confidentiality: confidentiality.String(), + Integrity: integrity.String(), + Availability: availability.String(), + JustificationCiaRating: payload.JustificationCiaRating, + } + return dataAssetInput, true +} + +func (s *server) getTrustBoundaries(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, aModel.TrustBoundaries) + } +} + +type payloadSharedRuntime struct { + Title string `yaml:"title" json:"title"` + Id string `yaml:"id" json:"id"` + Description string `yaml:"description" json:"description"` + Tags []string `yaml:"tags" json:"tags"` + TechnicalAssetsRunning []string `yaml:"technical_assets_running" json:"technical_assets_running"` +} + +func (s *server) setSharedRuntime(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, sharedRuntime := range modelInput.SharedRuntimes { + if sharedRuntime.ID == ginContext.Param("shared-runtime-id") { + payload := payloadSharedRuntime{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + sharedRuntimeInput, ok := populateSharedRuntime(ginContext, payload) + if !ok { + return + } + // in order to also update the title, remove the shared runtime from the map and re-insert it (with new key) + delete(modelInput.SharedRuntimes, title) + modelInput.SharedRuntimes[payload.Title] = sharedRuntimeInput + idChanged := sharedRuntimeInput.ID != sharedRuntime.ID + if idChanged { // ID-CHANGE-PROPAGATION + for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories { + if individualRiskCat.RisksIdentified != nil { + for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified { + if individualRiskInstance.MostRelevantSharedRuntime == sharedRuntime.ID { // apply the ID change + x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] + x.MostRelevantSharedRuntime = sharedRuntimeInput.ID // TODO needs more testing + modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x + } + } + } + } + } + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Update") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "shared runtime updated", + "id": sharedRuntimeInput.ID, + "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded + }) + } + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "shared runtime not found", + }) + } +} + +func (s *server) getSharedRuntime(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, sharedRuntime := range modelInput.SharedRuntimes { + if sharedRuntime.ID == ginContext.Param("shared-runtime-id") { + ginContext.JSON(http.StatusOK, gin.H{ + title: sharedRuntime, + }) + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "shared runtime not found", + }) + } +} + +func (s *server) createNewSharedRuntime(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + payload := payloadSharedRuntime{} + err := ginContext.BindJSON(&payload) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "unable to parse request payload", + }) + return + } + // yes, here keyed by title in YAML for better readability in the YAML file itself + if _, exists := modelInput.SharedRuntimes[payload.Title]; exists { + ginContext.JSON(http.StatusConflict, gin.H{ + "error": "shared runtime with this title already exists", + }) + return + } + // but later it will in memory keyed by its "id", so do this uniqueness check also + for _, sharedRuntime := range modelInput.SharedRuntimes { + if sharedRuntime.ID == payload.Id { + ginContext.JSON(http.StatusConflict, gin.H{ + "error": "shared runtime with this id already exists", + }) + return + } + } + if !checkTechnicalAssetsExisting(modelInput, payload.TechnicalAssetsRunning) { + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": "referenced technical asset does not exist", + }) + return + } + sharedRuntimeInput, ok := populateSharedRuntime(ginContext, payload) + if !ok { + return + } + if modelInput.SharedRuntimes == nil { + modelInput.SharedRuntimes = make(map[string]input.SharedRuntime) + } + modelInput.SharedRuntimes[payload.Title] = sharedRuntimeInput + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Creation") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "shared runtime created", + "id": sharedRuntimeInput.ID, + }) + } + } +} + +func checkTechnicalAssetsExisting(modelInput input.Model, techAssetIDs []string) (ok bool) { + for _, techAssetID := range techAssetIDs { + exists := false + for _, val := range modelInput.TechnicalAssets { + if val.ID == techAssetID { + exists = true + break + } + } + if !exists { + return false + } + } + return true +} + +func populateSharedRuntime(_ *gin.Context, payload payloadSharedRuntime) (sharedRuntimeInput input.SharedRuntime, ok bool) { + sharedRuntimeInput = input.SharedRuntime{ + ID: payload.Id, + Description: payload.Description, + Tags: lowerCaseAndTrim(payload.Tags), + TechnicalAssetsRunning: payload.TechnicalAssetsRunning, + } + return sharedRuntimeInput, true +} + +func (s *server) deleteSharedRuntime(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + referencesDeleted := false + // yes, here keyed by title in YAML for better readability in the YAML file itself + for title, sharedRuntime := range modelInput.SharedRuntimes { + if sharedRuntime.ID == ginContext.Param("shared-runtime-id") { + // also remove all usages of this shared runtime !! + for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories { + if individualRiskCat.RisksIdentified != nil { + for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified { + if individualRiskInstance.MostRelevantSharedRuntime == sharedRuntime.ID { // apply the removal + referencesDeleted = true + x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] + x.MostRelevantSharedRuntime = "" // TODO needs more testing + modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x + } + } + } + } + // remove it itself + delete(modelInput.SharedRuntimes, title) + ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Deletion") + if ok { + ginContext.JSON(http.StatusOK, gin.H{ + "message": "shared runtime deleted", + "id": sharedRuntime.ID, + "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well + }) + } + return + } + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "shared runtime not found", + }) + } +} + +func (s *server) getSharedRuntimes(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + ginContext.JSON(http.StatusOK, aModel.SharedRuntimes) + } +} + +func (s *server) readModel(ginContext *gin.Context, modelUUID string, key []byte, folderNameOfKey string) (modelInputResult input.Model, yamlText string, ok bool) { + modelFolder, ok := s.checkModelFolder(ginContext, modelUUID, folderNameOfKey) + if !ok { + return modelInputResult, yamlText, false + } + cryptoKey := generateKeyFromAlreadyStrongRandomInput(key) + block, err := aes.NewCipher(cryptoKey) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + aesGcm, err := cipher.NewGCM(block) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + + fileBytes, err := os.ReadFile(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile))) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + + nonce := fileBytes[0:12] + ciphertext := fileBytes[12:] + plaintext, err := aesGcm.Open(nil, nonce, ciphertext, nil) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + + r, err := gzip.NewReader(bytes.NewReader(plaintext)) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(r) + modelInput := new(input.Model).Defaults() + yamlBytes := buf.Bytes() + err = yaml.Unmarshal(yamlBytes, &modelInput) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to open model", + }) + return modelInputResult, yamlText, false + } + return *modelInput, string(yamlBytes), true +} + +func (s *server) writeModel(ginContext *gin.Context, key []byte, folderNameOfKey string, modelInput *input.Model, changeReasonForHistory string) (ok bool) { + modelFolder, ok := s.checkModelFolder(ginContext, ginContext.Param("model-id"), folderNameOfKey) + if ok { + modelInput.ThreagileVersion = docs.ThreagileVersion + yamlBytes, err := yaml.Marshal(modelInput) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + /* + yamlBytes = model.ReformatYAML(yamlBytes) + */ + return s.writeModelYAML(ginContext, string(yamlBytes), key, modelFolder, changeReasonForHistory, false) + } + return false +} + +func (s *server) checkModelFolder(ginContext *gin.Context, modelUUID string, folderNameOfKey string) (modelFolder string, ok bool) { + uuidParsed, err := uuid.Parse(modelUUID) + if err != nil { + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "model not found", + }) + return modelFolder, false + } + modelFolder = folderNameForModel(folderNameOfKey, uuidParsed.String()) + if _, err := os.Stat(modelFolder); os.IsNotExist(err) { + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "model not found", + }) + return modelFolder, false + } + return modelFolder, true +} + +func (s *server) getModel(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if ok { + tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-*.yaml") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + err = os.WriteFile(tmpResultFile.Name(), []byte(yamlText), 0400) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to stream model file", + }) + return + } + defer func() { _ = os.Remove(tmpResultFile.Name()) }() + ginContext.FileAttachment(tmpResultFile.Name(), s.config.InputFile) + } +} + +// fully replaces threagile.yaml in sub-folder given by UUID +func (s *server) importModel(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer s.unlockFolder(folderNameOfKey) + + aUuid := ginContext.Param("model-id") // UUID is syntactically validated in readModel+checkModelFolder (next line) via uuid.Parse(modelUUID) + _, _, ok = s.readModel(ginContext, aUuid, key, folderNameOfKey) + if ok { + // first analyze it simply by executing the full risk process (just discard the result) to ensure that everything would work + yamlContent, ok := s.execute(ginContext, true) + if ok { + // if we're here, then no problem was raised, so ok to proceed + ok = s.writeModelYAML(ginContext, string(yamlContent), key, folderNameForModel(folderNameOfKey, aUuid), "Model Import", false) + if ok { + ginContext.JSON(http.StatusCreated, gin.H{ + "message": "model imported", + }) + } + } + } +} + +func (s *server) analyzeModelOnServerDirectly(ginContext *gin.Context) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer func() { + s.unlockFolder(folderNameOfKey) + var err error + if r := recover(); r != nil { + err = r.(error) + if s.config.Verbose { + log.Println(err) + } + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": strings.TrimSpace(err.Error()), + }) + ok = false + } + }() + + dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + + _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if !ok { + return + } + tmpModelFile, err := os.CreateTemp(s.config.TempFolder, "threagile-direct-analyze-*") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + defer func() { _ = os.Remove(tmpModelFile.Name()) }() + tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-direct-analyze-") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + defer func() { _ = os.RemoveAll(tmpOutputDir) }() + tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-result-*.zip") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + defer func() { _ = os.Remove(tmpResultFile.Name()) }() + + err = os.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400) + + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, true, true, true, true, true, true, true, true, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + err = os.WriteFile(filepath.Join(tmpOutputDir, s.config.InputFile), []byte(yamlText), 0400) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + + files := []string{ + filepath.Join(tmpOutputDir, s.config.InputFile), + filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG), + filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG), + filepath.Join(tmpOutputDir, s.config.ReportFilename), + filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename), + filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename), + filepath.Join(tmpOutputDir, s.config.JsonRisksFilename), + filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename), + filepath.Join(tmpOutputDir, s.config.JsonStatsFilename), + } + if s.config.KeepDiagramSourceFiles { + files = append(files, filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenameDOT)) + files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenameDOT)) + } + err = zipFiles(tmpResultFile.Name(), files) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + if s.config.Verbose { + fmt.Println("Streaming back result file: " + tmpResultFile.Name()) + } + ginContext.FileAttachment(tmpResultFile.Name(), "threagile-result.zip") +} + +func (s *server) writeModelYAML(ginContext *gin.Context, yaml string, key []byte, modelFolder string, changeReasonForHistory string, skipBackup bool) (ok bool) { + if s.config.Verbose { + fmt.Println("about to write " + strconv.Itoa(len(yaml)) + " bytes of yaml into model folder: " + modelFolder) + } + var b bytes.Buffer + w := gzip.NewWriter(&b) + _, _ = w.Write([]byte(yaml)) + _ = w.Close() + plaintext := b.Bytes() + cryptoKey := generateKeyFromAlreadyStrongRandomInput(key) + block, err := aes.NewCipher(cryptoKey) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + // Never use more than 2^32 random nonces with a given key because of the risk of a repeat. + nonce := make([]byte, 12) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + aesGcm, err := cipher.NewGCM(block) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + ciphertext := aesGcm.Seal(nil, nonce, plaintext, nil) + if !skipBackup { + err = s.backupModelToHistory(modelFolder, changeReasonForHistory) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + } + f, err := os.Create(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile))) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to write model", + }) + return false + } + _, _ = f.Write(nonce) + _, _ = f.Write(ciphertext) + _ = f.Close() + return true +} + +func (s *server) lockFolder(folderName string) { + s.globalLock.Lock() + defer s.globalLock.Unlock() + _, exists := s.locksByFolderName[folderName] + if !exists { + s.locksByFolderName[folderName] = &sync.Mutex{} + } + s.locksByFolderName[folderName].Lock() +} + +func (s *server) unlockFolder(folderName string) { + if _, exists := s.locksByFolderName[folderName]; exists { + s.locksByFolderName[folderName].Unlock() + delete(s.locksByFolderName, folderName) + } +} + +func (s *server) backupModelToHistory(modelFolder string, changeReasonForHistory string) (err error) { + historyFolder := filepath.Join(modelFolder, "history") + if _, err := os.Stat(historyFolder); os.IsNotExist(err) { + err = os.Mkdir(historyFolder, 0700) + if err != nil { + return err + } + } + inputModel, err := os.ReadFile(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile))) + if err != nil { + return err + } + historyFile := filepath.Join(historyFolder, time.Now().Format("2006-01-02 15:04:05")+" "+changeReasonForHistory+".backup") + err = os.WriteFile(historyFile, inputModel, 0400) + if err != nil { + return err + } + // now delete any old files if over limit to keep + files, err := os.ReadDir(historyFolder) + if err != nil { + return err + } + if len(files) > s.config.BackupHistoryFilesToKeep { + requiredToDelete := len(files) - s.config.BackupHistoryFilesToKeep + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + for _, file := range files { + requiredToDelete-- + if file.Name() != filepath.Clean(file.Name()) { + return fmt.Errorf("weird file name %v", file.Name()) + } + err = os.Remove(filepath.Join(historyFolder, file.Name())) + if err != nil { + return err + } + if requiredToDelete <= 0 { + break + } + } + } + return +} + +func folderNameForModel(folderNameOfKey string, uuid string) string { + return filepath.Join(folderNameOfKey, uuid) +} + +type argon2Params struct { + memory uint32 + iterations uint32 + parallelism uint8 + saltLength uint32 + keyLength uint32 +} + +func generateKeyFromAlreadyStrongRandomInput(alreadyRandomInput []byte) []byte { + // Establish the parameters to use for Argon2. + p := &argon2Params{ + memory: 64 * 1024, + iterations: 3, + parallelism: 2, + saltLength: 16, + keyLength: keySize, + } + // As the input is already cryptographically secure random, the salt is simply the first n bytes + salt := alreadyRandomInput[0:p.saltLength] + hash := argon2.IDKey(alreadyRandomInput[p.saltLength:], salt, p.iterations, p.memory, p.parallelism, p.keyLength) + return hash +} + +func lowerCaseAndTrim(tags []string) []string { + for i := range tags { + tags[i] = strings.ToLower(strings.TrimSpace(tags[i])) + } + return tags +} diff --git a/pkg/server/report.go b/pkg/server/report.go new file mode 100644 index 00000000..f8349200 --- /dev/null +++ b/pkg/server/report.go @@ -0,0 +1,177 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "log" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/gin-gonic/gin" +) + +type responseType int + +const ( + dataFlowDiagram responseType = iota + dataAssetDiagram + reportPDF + risksExcel + tagsExcel + risksJSON + technicalAssetsJSON + statsJSON +) + +func (s *server) streamDataFlowDiagram(ginContext *gin.Context) { + s.streamResponse(ginContext, dataFlowDiagram) +} + +func (s *server) streamDataAssetDiagram(ginContext *gin.Context) { + s.streamResponse(ginContext, dataAssetDiagram) +} + +func (s *server) streamReportPDF(ginContext *gin.Context) { + s.streamResponse(ginContext, reportPDF) +} + +func (s *server) streamRisksExcel(ginContext *gin.Context) { + s.streamResponse(ginContext, risksExcel) +} + +func (s *server) streamTagsExcel(ginContext *gin.Context) { + s.streamResponse(ginContext, tagsExcel) +} + +func (s *server) streamRisksJSON(ginContext *gin.Context) { + s.streamResponse(ginContext, risksJSON) +} + +func (s *server) streamTechnicalAssetsJSON(ginContext *gin.Context) { + s.streamResponse(ginContext, technicalAssetsJSON) +} + +func (s *server) streamStatsJSON(ginContext *gin.Context) { + s.streamResponse(ginContext, statsJSON) +} + +func (s *server) streamResponse(ginContext *gin.Context, responseType responseType) { + folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext) + if !ok { + return + } + s.lockFolder(folderNameOfKey) + defer func() { + s.unlockFolder(folderNameOfKey) + var err error + if r := recover(); r != nil { + err = r.(error) + if s.config.Verbose { + log.Println(err) + } + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": strings.TrimSpace(err.Error()), + }) + ok = false + } + }() + dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey) + if !ok { + return + } + tmpModelFile, err := os.CreateTemp(s.config.TempFolder, "threagile-render-*") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + defer func() { _ = os.Remove(tmpModelFile.Name()) }() + tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-render-") + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + defer func() { _ = os.RemoveAll(tmpOutputDir) }() + err = os.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400) + if responseType == dataFlowDiagram { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, true, false, false, false, false, false, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.File(filepath.Clean(filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG))) + } else if responseType == dataAssetDiagram { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, true, false, false, false, false, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.File(filepath.Clean(filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG))) + } else if responseType == reportPDF { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, true, false, false, false, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ReportFilename)), s.config.ReportFilename) + } else if responseType == risksExcel { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, true, false, false, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename)), s.config.ExcelRisksFilename) + } else if responseType == tagsExcel { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, true, false, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename)), s.config.ExcelTagsFilename) + } else if responseType == risksJSON { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, true, false, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonRisksFilename))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download + } else if responseType == technicalAssetsJSON { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, true, true, false, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download + } else if responseType == statsJSON { + s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, false, false, true, dpi) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonStatsFilename))) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download + } +} diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 00000000..2801a469 --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,299 @@ +/* +Copyright Š 2023 NAME HERE +*/ + +package server + +import ( + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + "github.com/threagile/threagile/pkg/common" + "github.com/threagile/threagile/pkg/model" + + "github.com/gin-gonic/gin" + "github.com/threagile/threagile/pkg/docs" + "github.com/threagile/threagile/pkg/security/risks" + "github.com/threagile/threagile/pkg/security/types" +) + +type server struct { + config *common.Config + successCount int + errorCount int + globalLock sync.Mutex + throttlerLock sync.Mutex + createdObjectsThrottler map[string][]int64 + mapTokenHashToTimeoutStruct map[string]timeoutStruct + mapFolderNameToTokenHash map[string]string + extremeShortTimeoutsForTesting bool + locksByFolderName map[string]*sync.Mutex + customRiskRules map[string]*model.CustomRisk +} + +func RunServer(config *common.Config) { + s := &server{ + config: config, + createdObjectsThrottler: make(map[string][]int64), + mapTokenHashToTimeoutStruct: make(map[string]timeoutStruct), + mapFolderNameToTokenHash: make(map[string]string), + extremeShortTimeoutsForTesting: false, + locksByFolderName: make(map[string]*sync.Mutex), + } + router := gin.Default() + router.LoadHTMLGlob(filepath.Join(s.config.ServerFolder, "s", "static", "*.html")) // <== + router.GET("/", func(c *gin.Context) { + c.HTML(http.StatusOK, "index.html", gin.H{}) + }) + router.HEAD("/", func(c *gin.Context) { + c.HTML(http.StatusOK, "index.html", gin.H{}) + }) + router.StaticFile("/threagile.png", filepath.Join(s.config.ServerFolder, "s", "static", "threagile.png")) // <== + router.StaticFile("/site.webmanifest", filepath.Join(s.config.ServerFolder, "s", "static", "site.webmanifest")) + router.StaticFile("/favicon.ico", filepath.Join(s.config.ServerFolder, "s", "static", "favicon.ico")) + router.StaticFile("/favicon-32x32.png", filepath.Join(s.config.ServerFolder, "s", "static", "favicon-32x32.png")) + router.StaticFile("/favicon-16x16.png", filepath.Join(s.config.ServerFolder, "s", "static", "favicon-16x16.png")) + router.StaticFile("/apple-touch-icon.png", filepath.Join(s.config.ServerFolder, "s", "static", "apple-touch-icon.png")) + router.StaticFile("/android-chrome-512x512.png", filepath.Join(s.config.ServerFolder, "s", "static", "android-chrome-512x512.png")) + router.StaticFile("/android-chrome-192x192.png", filepath.Join(s.config.ServerFolder, "s", "static", "android-chrome-192x192.png")) + + router.StaticFile("/schema.json", filepath.Join(s.config.AppFolder, "schema.json")) + router.StaticFile("/live-templates.txt", filepath.Join(s.config.AppFolder, "live-templates.txt")) + router.StaticFile("/openapi.yaml", filepath.Join(s.config.AppFolder, "openapi.yaml")) + router.StaticFile("/swagger-ui/", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/index.html")) + router.StaticFile("/swagger-ui/index.html", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/index.html")) + router.StaticFile("/swagger-ui/oauth2-redirect.html", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/oauth2-redirect.html")) + router.StaticFile("/swagger-ui/swagger-ui.css", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui.css")) + router.StaticFile("/swagger-ui/swagger-ui.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui.js")) + router.StaticFile("/swagger-ui/swagger-ui-bundle.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui-bundle.js")) + router.StaticFile("/swagger-ui/swagger-ui-standalone-preset.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui-standalone-preset.js")) // <== + + router.GET("/threagile-example-model.yaml", s.exampleFile) + router.GET("/threagile-stub-model.yaml", s.stubFile) + + router.GET("/meta/ping", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "pong", + }) + }) + router.GET("/meta/version", func(c *gin.Context) { + c.JSON(200, gin.H{ + "version": docs.ThreagileVersion, + "build_timestamp": s.config.BuildTimestamp, + }) + }) + router.GET("/meta/types", func(c *gin.Context) { + c.JSON(200, gin.H{ + "quantity": arrayOfStringValues(types.QuantityValues()), + "confidentiality": arrayOfStringValues(types.ConfidentialityValues()), + "criticality": arrayOfStringValues(types.CriticalityValues()), + "technical_asset_type": arrayOfStringValues(types.TechnicalAssetTypeValues()), + "technical_asset_size": arrayOfStringValues(types.TechnicalAssetSizeValues()), + "authorization": arrayOfStringValues(types.AuthorizationValues()), + "authentication": arrayOfStringValues(types.AuthenticationValues()), + "usage": arrayOfStringValues(types.UsageValues()), + "encryption": arrayOfStringValues(types.EncryptionStyleValues()), + "data_format": arrayOfStringValues(types.DataFormatValues()), + "protocol": arrayOfStringValues(types.ProtocolValues()), + "technical_asset_technology": arrayOfStringValues(types.TechnicalAssetTechnologyValues()), + "technical_asset_machine": arrayOfStringValues(types.TechnicalAssetMachineValues()), + "trust_boundary_type": arrayOfStringValues(types.TrustBoundaryTypeValues()), + "data_breach_probability": arrayOfStringValues(types.DataBreachProbabilityValues()), + "risk_severity": arrayOfStringValues(types.RiskSeverityValues()), + "risk_exploitation_likelihood": arrayOfStringValues(types.RiskExploitationLikelihoodValues()), + "risk_exploitation_impact": arrayOfStringValues(types.RiskExploitationImpactValues()), + "risk_function": arrayOfStringValues(types.RiskFunctionValues()), + "risk_status": arrayOfStringValues(types.RiskStatusValues()), + "stride": arrayOfStringValues(types.STRIDEValues()), + }) + }) + + // TODO router.GET("/meta/risk-rules", listRiskRules) + // TODO router.GET("/meta/model-macros", listModelMacros) + + router.GET("/meta/stats", s.stats) + + router.POST("/direct/analyze", s.analyze) + router.POST("/direct/check", s.check) + router.GET("/direct/stub", s.stubFile) + + router.POST("/auth/keys", s.createKey) + router.DELETE("/auth/keys", s.deleteKey) + router.POST("/auth/tokens", s.createToken) + router.DELETE("/auth/tokens", s.deleteToken) + + router.POST("/models", s.createNewModel) + router.GET("/models", s.listModels) + router.DELETE("/models/:model-id", s.deleteModel) + router.GET("/models/:model-id", s.getModel) + router.PUT("/models/:model-id", s.importModel) + router.GET("/models/:model-id/data-flow-diagram", s.streamDataFlowDiagram) + router.GET("/models/:model-id/data-asset-diagram", s.streamDataAssetDiagram) + router.GET("/models/:model-id/report-pdf", s.streamReportPDF) + router.GET("/models/:model-id/risks-excel", s.streamRisksExcel) + router.GET("/models/:model-id/tags-excel", s.streamTagsExcel) + router.GET("/models/:model-id/risks", s.streamRisksJSON) + router.GET("/models/:model-id/technical-assets", s.streamTechnicalAssetsJSON) + router.GET("/models/:model-id/stats", s.streamStatsJSON) + router.GET("/models/:model-id/analysis", s.analyzeModelOnServerDirectly) + + router.GET("/models/:model-id/cover", s.getCover) + router.PUT("/models/:model-id/cover", s.setCover) + router.GET("/models/:model-id/overview", s.getOverview) + router.PUT("/models/:model-id/overview", s.setOverview) + //router.GET("/models/:model-id/questions", getQuestions) + //router.PUT("/models/:model-id/questions", setQuestions) + router.GET("/models/:model-id/abuse-cases", s.getAbuseCases) + router.PUT("/models/:model-id/abuse-cases", s.setAbuseCases) + router.GET("/models/:model-id/security-requirements", s.getSecurityRequirements) + router.PUT("/models/:model-id/security-requirements", s.setSecurityRequirements) + //router.GET("/models/:model-id/tags", getTags) + //router.PUT("/models/:model-id/tags", setTags) + + router.GET("/models/:model-id/data-assets", s.getDataAssets) + router.POST("/models/:model-id/data-assets", s.createNewDataAsset) + router.GET("/models/:model-id/data-assets/:data-asset-id", s.getDataAsset) + router.PUT("/models/:model-id/data-assets/:data-asset-id", s.setDataAsset) + router.DELETE("/models/:model-id/data-assets/:data-asset-id", s.deleteDataAsset) + + router.GET("/models/:model-id/trust-boundaries", s.getTrustBoundaries) + // router.POST("/models/:model-id/trust-boundaries", createNewTrustBoundary) + // router.GET("/models/:model-id/trust-boundaries/:trust-boundary-id", getTrustBoundary) + // router.PUT("/models/:model-id/trust-boundaries/:trust-boundary-id", setTrustBoundary) + // router.DELETE("/models/:model-id/trust-boundaries/:trust-boundary-id", deleteTrustBoundary) + + router.GET("/models/:model-id/shared-runtimes", s.getSharedRuntimes) + router.POST("/models/:model-id/shared-runtimes", s.createNewSharedRuntime) + router.GET("/models/:model-id/shared-runtimes/:shared-runtime-id", s.getSharedRuntime) + router.PUT("/models/:model-id/shared-runtimes/:shared-runtime-id", s.setSharedRuntime) + router.DELETE("/models/:model-id/shared-runtimes/:shared-runtime-id", s.deleteSharedRuntime) + + reporter := common.DefaultProgressReporter{Verbose: s.config.Verbose} + s.customRiskRules = model.LoadCustomRiskRules(s.config.RiskRulesPlugins, reporter) + + fmt.Println("Threagile s running...") + _ = router.Run(":" + strconv.Itoa(s.config.ServerPort)) // listen and serve on 0.0.0.0:8080 or whatever port was specified +} + +func (s *server) exampleFile(ginContext *gin.Context) { + example, err := os.ReadFile(filepath.Join(s.config.AppFolder, "threagile-example-model.yaml")) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.Data(http.StatusOK, gin.MIMEYAML, example) +} + +func (s *server) stubFile(ginContext *gin.Context) { + stub, err := os.ReadFile(filepath.Join(s.config.AppFolder, "threagile-stub-model.yaml")) + if err != nil { + handleErrorInServiceCall(err, ginContext) + return + } + ginContext.Data(http.StatusOK, gin.MIMEYAML, s.addSupportedTags(stub)) // TODO use also the MIMEYAML way of serving YAML in model export? +} + +func (s *server) addSupportedTags(input []byte) []byte { + // add distinct tags as "tags_available" + supportedTags := make(map[string]bool) + for _, customRule := range s.customRiskRules { + for _, tag := range customRule.Tags { + supportedTags[strings.ToLower(tag)] = true + } + } + + for _, rule := range risks.GetBuiltInRiskRules() { + for _, tag := range rule.SupportedTags() { + supportedTags[strings.ToLower(tag)] = true + } + } + + tags := make([]string, 0, len(supportedTags)) + for t := range supportedTags { + tags = append(tags, t) + } + if len(tags) == 0 { + return input + } + sort.Strings(tags) + if s.config.Verbose { + fmt.Print("Supported tags of all risk rules: ") + for i, tag := range tags { + if i > 0 { + fmt.Print(", ") + } + fmt.Print(tag) + } + fmt.Println() + } + replacement := "tags_available:" + for _, tag := range tags { + replacement += "\n - " + tag + } + return []byte(strings.Replace(string(input), "tags_available:", replacement, 1)) +} + +func arrayOfStringValues(values []types.TypeEnum) []string { + result := make([]string, 0) + for _, value := range values { + result = append(result, value.String()) + } + return result +} + +func (s *server) stats(ginContext *gin.Context) { + keyCount, modelCount := 0, 0 + keyFolders, err := os.ReadDir(filepath.Join(s.config.ServerFolder, s.config.KeyFolder)) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to collect stats", + }) + return + } + for _, keyFolder := range keyFolders { + if len(keyFolder.Name()) == 128 { // it's a sha512 token hash probably, so count it as token folder for the stats + keyCount++ + if keyFolder.Name() != filepath.Clean(keyFolder.Name()) { + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "weird file path", + }) + return + } + modelFolders, err := os.ReadDir(filepath.Join(s.config.ServerFolder, s.config.KeyFolder, keyFolder.Name())) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to collect stats", + }) + return + } + for _, modelFolder := range modelFolders { + if len(modelFolder.Name()) == 36 { // it's a uuid model folder probably, so count it as model folder for the stats + modelCount++ + } + } + } + } + // TODO collect and deliver more stats (old model count?) and health info + ginContext.JSON(http.StatusOK, gin.H{ + "key_count": keyCount, + "model_count": modelCount, + "success_count": s.successCount, + "error_count": s.errorCount, + }) +} + +func handleErrorInServiceCall(err error, ginContext *gin.Context) { + log.Println(err) + ginContext.JSON(http.StatusBadRequest, gin.H{ + "error": strings.TrimSpace(err.Error()), + }) +} diff --git a/pkg/server/token.go b/pkg/server/token.go new file mode 100644 index 00000000..d9acb2b6 --- /dev/null +++ b/pkg/server/token.go @@ -0,0 +1,297 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +const keySize = 32 + +type keyHeader struct { + Key string `header:"key"` +} + +type timeoutStruct struct { + xorRand []byte + createdNanoTime, lastAccessedNanoTime int64 +} + +func (s *server) createKey(ginContext *gin.Context) { + ok := s.checkObjectCreationThrottler(ginContext, "KEY") + if !ok { + return + } + s.globalLock.Lock() + defer s.globalLock.Unlock() + + keyBytesArr := make([]byte, keySize) + n, err := rand.Read(keyBytesArr[:]) + if n != keySize || err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to create key", + }) + return + } + err = os.MkdirAll(s.folderNameFromKey(keyBytesArr), 0700) + if err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to create key", + }) + return + } + ginContext.JSON(http.StatusCreated, gin.H{ + "key": base64.RawURLEncoding.EncodeToString(keyBytesArr[:]), + }) +} + +func (s *server) checkObjectCreationThrottler(ginContext *gin.Context, typeName string) bool { + s.throttlerLock.Lock() + defer s.throttlerLock.Unlock() + + // remove all elements older than 3 minutes (= 180000000000 ns) + now := time.Now().UnixNano() + cutoff := now - 180000000000 + for keyCheck := range s.createdObjectsThrottler { + for i := 0; i < len(s.createdObjectsThrottler[keyCheck]); i++ { + if s.createdObjectsThrottler[keyCheck][i] < cutoff { + // Remove the element at index i from slice (safe while looping using i as iterator) + s.createdObjectsThrottler[keyCheck] = append(s.createdObjectsThrottler[keyCheck][:i], s.createdObjectsThrottler[keyCheck][i+1:]...) + i-- // Since we just deleted a[i], we must redo that index + } + } + length := len(s.createdObjectsThrottler[keyCheck]) + if length == 0 { + delete(s.createdObjectsThrottler, keyCheck) + } + /* + if *verbose { + log.Println("Throttling count: "+strconv.Itoa(length)) + } + */ + } + + // check current request + keyHash := hash(typeName) // getting the real client ip is not easy inside fully encapsulated containerized runtime + if _, ok := s.createdObjectsThrottler[keyHash]; !ok { + s.createdObjectsThrottler[keyHash] = make([]int64, 0) + } + // check the limit of 20 creations for this type per 3 minutes + withinLimit := len(s.createdObjectsThrottler[keyHash]) < 20 + if withinLimit { + s.createdObjectsThrottler[keyHash] = append(s.createdObjectsThrottler[keyHash], now) + return true + } + ginContext.JSON(http.StatusTooManyRequests, gin.H{ + "error": "object creation throttling exceeded (denial-of-service protection): please wait some time and try again", + }) + return false +} + +func (s *server) deleteKey(ginContext *gin.Context) { + folderName, _, ok := s.checkKeyToFolderName(ginContext) + if !ok { + return + } + s.globalLock.Lock() + defer s.globalLock.Unlock() + err := os.RemoveAll(folderName) + if err != nil { + log.Println("error during key delete: " + err.Error()) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "key not found", + }) + return + } + ginContext.JSON(http.StatusOK, gin.H{ + "message": "key deleted", + }) +} + +func (s *server) createToken(ginContext *gin.Context) { + folderName, key, ok := s.checkKeyToFolderName(ginContext) + if !ok { + return + } + s.globalLock.Lock() + defer s.globalLock.Unlock() + if tokenHash, exists := s.mapFolderNameToTokenHash[folderName]; exists { + // invalidate previous token + delete(s.mapTokenHashToTimeoutStruct, tokenHash) + } + // create a strong random 256 bit value (used to xor) + xorBytesArr := make([]byte, keySize) + n, err := rand.Read(xorBytesArr[:]) + if n != keySize || err != nil { + log.Println(err) + ginContext.JSON(http.StatusInternalServerError, gin.H{ + "error": "unable to create token", + }) + return + } + now := time.Now().UnixNano() + token := xor(key, xorBytesArr) + tokenHash := hashSHA256(token) + s.housekeepingTokenMaps() + s.mapTokenHashToTimeoutStruct[tokenHash] = timeoutStruct{ + xorRand: xorBytesArr, + createdNanoTime: now, + lastAccessedNanoTime: now, + } + s.mapFolderNameToTokenHash[folderName] = tokenHash + ginContext.JSON(http.StatusCreated, gin.H{ + "token": base64.RawURLEncoding.EncodeToString(token[:]), + }) +} + +type tokenHeader struct { + Token string `header:"token"` +} + +func (s *server) deleteToken(ginContext *gin.Context) { + header := tokenHeader{} + if err := ginContext.ShouldBindHeader(&header); err != nil { + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return + } + token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token)) + if len(token) == 0 || err != nil { + if err != nil { + log.Println(err) + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return + } + s.globalLock.Lock() + defer s.globalLock.Unlock() + s.deleteTokenHashFromMaps(hashSHA256(token)) + ginContext.JSON(http.StatusOK, gin.H{ + "message": "token deleted", + }) +} + +func (s *server) checkKeyToFolderName(ginContext *gin.Context) (folderNameOfKey string, key []byte, ok bool) { + header := keyHeader{} + if err := ginContext.ShouldBindHeader(&header); err != nil { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "key not found", + }) + return folderNameOfKey, key, false + } + key, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Key)) + if len(key) == 0 || err != nil { + if err != nil { + log.Println(err) + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "key not found", + }) + return folderNameOfKey, key, false + } + folderNameOfKey = s.folderNameFromKey(key) + if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "key not found", + }) + return folderNameOfKey, key, false + } + return folderNameOfKey, key, true +} + +func (s *server) checkTokenToFolderName(ginContext *gin.Context) (folderNameOfKey string, key []byte, ok bool) { + header := tokenHeader{} + if err := ginContext.ShouldBindHeader(&header); err != nil { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return folderNameOfKey, key, false + } + token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token)) + if len(token) == 0 || err != nil { + if err != nil { + log.Println(err) + } + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return folderNameOfKey, key, false + } + s.globalLock.Lock() + defer s.globalLock.Unlock() + s.housekeepingTokenMaps() // to remove timed-out ones + tokenHash := hashSHA256(token) + if timeoutStruct, exists := s.mapTokenHashToTimeoutStruct[tokenHash]; exists { + // re-create the key from token + key := xor(token, timeoutStruct.xorRand) + folderNameOfKey := s.folderNameFromKey(key) + if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) { + log.Println(err) + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return folderNameOfKey, key, false + } + timeoutStruct.lastAccessedNanoTime = time.Now().UnixNano() + return folderNameOfKey, key, true + } else { + ginContext.JSON(http.StatusNotFound, gin.H{ + "error": "token not found", + }) + return folderNameOfKey, key, false + } +} + +func (s *server) folderNameFromKey(key []byte) string { + sha512Hash := hashSHA256(key) + return filepath.Join(s.config.ServerFolder, s.config.KeyFolder, sha512Hash) +} + +func (s *server) housekeepingTokenMaps() { + now := time.Now().UnixNano() + for tokenHash, val := range s.mapTokenHashToTimeoutStruct { + if s.extremeShortTimeoutsForTesting { + // remove all elements older than 1 minute (= 60000000000 ns) soft + // and all elements older than 3 minutes (= 180000000000 ns) hard + if now-val.lastAccessedNanoTime > 60000000000 || now-val.createdNanoTime > 180000000000 { + fmt.Println("About to remove a token hash from maps") + s.deleteTokenHashFromMaps(tokenHash) + } + } else { + // remove all elements older than 30 minutes (= 1800000000000 ns) soft + // and all elements older than 10 hours (= 36000000000000 ns) hard + if now-val.lastAccessedNanoTime > 1800000000000 || now-val.createdNanoTime > 36000000000000 { + s.deleteTokenHashFromMaps(tokenHash) + } + } + } +} + +func (s *server) deleteTokenHashFromMaps(tokenHash string) { + delete(s.mapTokenHashToTimeoutStruct, tokenHash) + for folderName, check := range s.mapFolderNameToTokenHash { + if check == tokenHash { + delete(s.mapFolderNameToTokenHash, folderName) + break + } + } +} diff --git a/pkg/server/zip.go b/pkg/server/zip.go new file mode 100644 index 00000000..32401189 --- /dev/null +++ b/pkg/server/zip.go @@ -0,0 +1,125 @@ +/* +Copyright Š 2023 NAME HERE +*/ +package server + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +// ZipFiles compresses one or many files into a single zip archive file. +// Param 1: filename is the output zip file's name. +// Param 2: files is a list of files to add to the zip. +func zipFiles(filename string, files []string) error { + newZipFile, err := os.Create(filepath.Clean(filename)) + if err != nil { + return err + } + defer func() { _ = newZipFile.Close() }() + + zipWriter := zip.NewWriter(newZipFile) + defer func() { _ = zipWriter.Close() }() + + // Add files to zip + for _, file := range files { + if err = addFileToZip(zipWriter, file); err != nil { + return err + } + } + return nil +} + +// Unzip will decompress a zip archive, moving all files and folders +// within the zip file (parameter 1) to an output directory (parameter 2). +func unzip(src string, dest string) ([]string, error) { + var filenames []string + + r, err := zip.OpenReader(src) + if err != nil { + return filenames, err + } + defer func() { _ = r.Close() }() + + for _, f := range r.File { + // Store filename/path for returning and using later on + path := filepath.Clean(filepath.Join(dest, filepath.Clean(f.Name))) + // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE + if !strings.HasPrefix(path, filepath.Clean(dest)+string(os.PathSeparator)) { + return filenames, fmt.Errorf("%s: illegal file path", path) + } + filenames = append(filenames, path) + if f.FileInfo().IsDir() { + // Make Folder + _ = os.MkdirAll(path, os.ModePerm) + continue + } + // Make File + if err = os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + return filenames, err + } + if path != filepath.Clean(path) { + return filenames, fmt.Errorf("weird file path %v", path) + } + outFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return filenames, err + } + + if f.FileInfo().Size() == 0 { + _ = outFile.Close() + continue + } + + rc, err := f.Open() + if err != nil { + return filenames, err + } + _, err = io.CopyN(outFile, rc, f.FileInfo().Size()) + // Close the file without defer to close before next iteration of loop + _ = outFile.Close() + _ = rc.Close() + if err != nil { + return filenames, err + } + } + return filenames, nil +} + +func addFileToZip(zipWriter *zip.Writer, filename string) error { + fileToZip, err := os.Open(filepath.Clean(filename)) + if err != nil { + return err + } + defer func() { _ = fileToZip.Close() }() + + // Get the file information + info, err := fileToZip.Stat() + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + + // Using FileInfoHeader() above only uses the basename of the file. If we want + // to preserve the folder structure we can overwrite this with the full path. + //header.Name = filename + + // Change to deflate to gain better compression + // see http://golang.org/pkg/archive/zip/#pkg-constants + header.Method = zip.Deflate + + writer, err := zipWriter.CreateHeader(header) + if err != nil { + return err + } + _, err = io.Copy(writer, fileToZip) + return err +} diff --git a/raa/dummy/dummy.go b/raa/dummy/dummy.go deleted file mode 100644 index febfaf9a..00000000 --- a/raa/dummy/dummy.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "fmt" - "github.com/threagile/threagile/model" - "math/rand" -) - -// JUST A DUMMY TO HAVE AN ALTERNATIVE PLUGIN TO USE/TEST - -// used from plugin caller: -func CalculateRAA() string { - for techAssetID, techAsset := range model.ParsedModelRoot.TechnicalAssets { - techAsset.RAA = float64(rand.Intn(100)) - fmt.Println("Using dummy RAA random calculation (just to test the usage of other shared object files as plugins)") - model.ParsedModelRoot.TechnicalAssets[techAssetID] = techAsset - } - // return intro text (for reporting etc., can be short summary-like) - return "Just some dummy algorithm implementation for demo purposes of pluggability..." -} diff --git a/report/json.go b/report/json.go deleted file mode 100644 index bd4d8009..00000000 --- a/report/json.go +++ /dev/null @@ -1,50 +0,0 @@ -package report - -import ( - "encoding/json" - "github.com/threagile/threagile/model" - "io/ioutil" -) - -func WriteRisksJSON(filename string) { - /* - remainingRisks := make([]model.Risk, 0) - for _, category := range model.SortedRiskCategories() { - risks := model.SortedRisksOfCategory(category) - for _, risk := range model.ReduceToOnlyStillAtRisk(risks) { - remainingRisks = append(remainingRisks, risk) - } - } - */ - jsonBytes, err := json.Marshal(model.AllRisks()) - if err != nil { - panic(err) - } - err = ioutil.WriteFile(filename, jsonBytes, 0644) - if err != nil { - panic(err) - } -} - -// TODO: also a "data assets" json? -func WriteTechnicalAssetsJSON(filename string) { - jsonBytes, err := json.Marshal(model.ParsedModelRoot.TechnicalAssets) - if err != nil { - panic(err) - } - err = ioutil.WriteFile(filename, jsonBytes, 0644) - if err != nil { - panic(err) - } -} - -func WriteStatsJSON(filename string) { - jsonBytes, err := json.Marshal(model.OverallRiskStatistics()) - if err != nil { - panic(err) - } - err = ioutil.WriteFile(filename, jsonBytes, 0644) - if err != nil { - panic(err) - } -} diff --git a/report/report.go b/report/report.go deleted file mode 100644 index 39d3bdff..00000000 --- a/report/report.go +++ /dev/null @@ -1,5878 +0,0 @@ -package report - -import ( - "errors" - "fmt" - "github.com/jung-kurt/gofpdf" - "github.com/jung-kurt/gofpdf/contrib/gofpdi" - "github.com/threagile/threagile/colors" - "github.com/threagile/threagile/model" - "github.com/threagile/threagile/risks/built-in/accidental-secret-leak" - "github.com/threagile/threagile/risks/built-in/code-backdooring" - "github.com/threagile/threagile/risks/built-in/container-baseimage-backdooring" - "github.com/threagile/threagile/risks/built-in/container-platform-escape" - "github.com/threagile/threagile/risks/built-in/cross-site-request-forgery" - "github.com/threagile/threagile/risks/built-in/cross-site-scripting" - "github.com/threagile/threagile/risks/built-in/dos-risky-access-across-trust-boundary" - "github.com/threagile/threagile/risks/built-in/incomplete-model" - "github.com/threagile/threagile/risks/built-in/ldap-injection" - "github.com/threagile/threagile/risks/built-in/missing-authentication" - "github.com/threagile/threagile/risks/built-in/missing-authentication-second-factor" - "github.com/threagile/threagile/risks/built-in/missing-build-infrastructure" - "github.com/threagile/threagile/risks/built-in/missing-cloud-hardening" - "github.com/threagile/threagile/risks/built-in/missing-file-validation" - "github.com/threagile/threagile/risks/built-in/missing-hardening" - "github.com/threagile/threagile/risks/built-in/missing-identity-propagation" - "github.com/threagile/threagile/risks/built-in/missing-identity-provider-isolation" - "github.com/threagile/threagile/risks/built-in/missing-identity-store" - "github.com/threagile/threagile/risks/built-in/missing-network-segmentation" - "github.com/threagile/threagile/risks/built-in/missing-vault" - "github.com/threagile/threagile/risks/built-in/missing-vault-isolation" - "github.com/threagile/threagile/risks/built-in/missing-waf" - "github.com/threagile/threagile/risks/built-in/mixed-targets-on-shared-runtime" - "github.com/threagile/threagile/risks/built-in/path-traversal" - "github.com/threagile/threagile/risks/built-in/push-instead-of-pull-deployment" - "github.com/threagile/threagile/risks/built-in/search-query-injection" - "github.com/threagile/threagile/risks/built-in/server-side-request-forgery" - "github.com/threagile/threagile/risks/built-in/service-registry-poisoning" - "github.com/threagile/threagile/risks/built-in/sql-nosql-injection" - "github.com/threagile/threagile/risks/built-in/unchecked-deployment" - "github.com/threagile/threagile/risks/built-in/unencrypted-asset" - "github.com/threagile/threagile/risks/built-in/unencrypted-communication" - "github.com/threagile/threagile/risks/built-in/unguarded-access-from-internet" - "github.com/threagile/threagile/risks/built-in/unguarded-direct-datastore-access" - "github.com/threagile/threagile/risks/built-in/unnecessary-communication-link" - "github.com/threagile/threagile/risks/built-in/unnecessary-data-asset" - "github.com/threagile/threagile/risks/built-in/unnecessary-data-transfer" - "github.com/threagile/threagile/risks/built-in/unnecessary-technical-asset" - "github.com/threagile/threagile/risks/built-in/untrusted-deserialization" - "github.com/threagile/threagile/risks/built-in/wrong-communication-link-content" - "github.com/threagile/threagile/risks/built-in/wrong-trust-boundary-content" - "github.com/threagile/threagile/risks/built-in/xml-external-entity" - "github.com/wcharczuk/go-chart" - "github.com/wcharczuk/go-chart/drawing" - "image" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const fontSizeHeadline, fontSizeHeadlineSmall, fontSizeBody, fontSizeSmall, fontSizeVerySmall = 20, 16, 12, 9, 7 -const /*dataFlowDiagramFullscreen,*/ allowedPdfLandscapePages, embedDiagramLegendPage = /*false,*/ true, false - -var isLandscapePage bool - -var pdf *gofpdf.Fpdf -var alreadyTemplateImported = false -var coverTemplateId, contentTemplateId, diagramLegendTemplateId int -var pageNo int -var linkCounter int -var tocLinkIdByAssetId map[string]int -var homeLink int -var currentChapterTitleBreadcrumb string - -var firstParagraphRegEx = regexp.MustCompile(`(.*?)((
)|(

))`) - -func initReport() { - pdf = nil - isLandscapePage = false - pageNo = 0 - linkCounter = 0 - homeLink = 0 - currentChapterTitleBreadcrumb = "" - tocLinkIdByAssetId = make(map[string]int) -} - -func WriteReportPDF(reportFilename string, - templateFilename string, - dataFlowDiagramFilenamePNG string, - dataAssetDiagramFilenamePNG string, - modelFilename string, - skipRiskRules string, - buildTimestamp string, - modelHash string, - introTextRAA string, customRiskRules map[string]model.CustomRiskRule) { - initReport() - createPdfAndInitMetadata() - parseBackgroundTemplate(templateFilename) - createCover() - createTableOfContents() - createManagementSummary() - createImpactInitialRisks() - createRiskMitigationStatus() - createImpactRemainingRisks() - createTargetDescription(filepath.Dir(modelFilename)) - embedDataFlowDiagram(dataFlowDiagramFilenamePNG) - createSecurityRequirements() - createAbuseCases() - createTagListing() - createSTRIDE() - createAssignmentByFunction() - createRAA(introTextRAA) - embedDataRiskMapping(dataAssetDiagramFilenamePNG) - //createDataRiskQuickWins() - createOutOfScopeAssets() - createModelFailures() - createQuestions() - createRiskCategories() - createTechnicalAssets() - createDataAssets() - createTrustBoundaries() - createSharedRuntimes() - createRiskRulesChecked(modelFilename, skipRiskRules, buildTimestamp, modelHash, customRiskRules) - createDisclaimer() - writeReportToFile(reportFilename) -} - -func checkErr(err error) { - if err != nil { - panic(err) - } -} - -func createPdfAndInitMetadata() { - pdf = gofpdf.New("P", "mm", "A4", "") - pdf.SetCreator(model.ParsedModelRoot.Author.Homepage, true) - pdf.SetAuthor(model.ParsedModelRoot.Author.Name, true) - pdf.SetTitle("Threat Model Report: "+model.ParsedModelRoot.Title, true) - pdf.SetSubject("Threat Model Report: "+model.ParsedModelRoot.Title, true) - // pdf.SetPageBox("crop", 0, 0, 100, 010) - pdf.SetHeaderFunc(headerFunc) - pdf.SetFooterFunc(footerFunc) - linkCounter = 1 // link counting starts at 1 via pdf.AddLink -} - -func headerFunc() { - if !isLandscapePage { - gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300) - pdf.SetTopMargin(35) - } -} - -func footerFunc() { - addBreadcrumb() - pdf.SetFont("Helvetica", "", 10) - pdf.SetTextColor(127, 127, 127) - pdf.Text(8.6, 284, "Threat Model Report via Threagile") //: "+model.ParsedModelRoot.Title) - pdf.Link(8.4, 281, 54.6, 4, homeLink) - pageNo++ - text := "Page " + strconv.Itoa(pageNo) - if pageNo < 10 { - text = " " + text - } else if pageNo < 100 { - text = " " + text - } - if pageNo > 1 { - pdf.Text(186, 284, text) - } -} - -func addBreadcrumb() { - if len(currentChapterTitleBreadcrumb) > 0 { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetFont("Helvetica", "", 10) - pdf.SetTextColor(127, 127, 127) - pdf.Text(46.7, 24.5, uni(currentChapterTitleBreadcrumb+" - "+model.ParsedModelRoot.Title)) - } -} - -func parseBackgroundTemplate(templateFilename string) { - /* - imageBox, err := rice.FindBox("template") - checkErr(err) - file, err := ioutil.TempFile("", "background-*-.pdf") - checkErr(err) - defer os.Remove(file.Name()) - backgroundBytes := imageBox.MustBytes("background.pdf") - err = ioutil.WriteFile(file.Name(), backgroundBytes, 0644) - checkErr(err) - */ - coverTemplateId = gofpdi.ImportPage(pdf, templateFilename, 1, "/MediaBox") - contentTemplateId = gofpdi.ImportPage(pdf, templateFilename, 2, "/MediaBox") - diagramLegendTemplateId = gofpdi.ImportPage(pdf, templateFilename, 3, "/MediaBox") -} - -func createCover() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.AddPage() - gofpdi.UseImportedTemplate(pdf, coverTemplateId, 0, 0, 0, 300) - pdf.SetFont("Helvetica", "B", 28) - pdf.SetTextColor(0, 0, 0) - pdf.Text(40, 110, "Threat Model Report") - pdf.Text(40, 125, uni(model.ParsedModelRoot.Title)) - pdf.SetFont("Helvetica", "", 12) - reportDate := model.ParsedModelRoot.Date - if reportDate.IsZero() { - reportDate = time.Now() - } - pdf.Text(40.7, 145, reportDate.Format("2 January 2006")) - pdf.Text(40.7, 153, uni(model.ParsedModelRoot.Author.Name)) - pdf.SetFont("Helvetica", "", 10) - pdf.SetTextColor(80, 80, 80) - pdf.Text(8.6, 275, model.ParsedModelRoot.Author.Homepage) - pdf.SetFont("Helvetica", "", 12) - pdf.SetTextColor(0, 0, 0) -} - -func createTableOfContents() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.AddPage() - currentChapterTitleBreadcrumb = "Table of Contents" - homeLink = pdf.AddLink() - defineLinkTarget("{home}") - gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300) - pdf.SetFont("Helvetica", "B", fontSizeHeadline) - pdf.Text(11, 40, "Table of Contents") - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetY(46) - - pdf.SetLineWidth(0.25) - pdf.SetDrawColor(160, 160, 160) - pdf.SetDashPattern([]float64{0.5, 0.5}, 0) - - // =============== - - var y float64 = 50 - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Text(11, y, "Results Overview") - pdf.SetFont("Helvetica", "", fontSizeBody) - - y += 6 - pdf.Text(11, y, " "+"Management Summary") - pdf.Text(175, y, "{management-summary}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - risks := "Risks" - catStr := "Categories" - count, catCount := model.TotalRiskCount(), len(model.GeneratedRisksByCategory) - if count == 1 { - risks = "Risk" - } - if catCount == 1 { - catStr = "Category" - } - y += 6 - pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Initial "+risks+" in "+strconv.Itoa(catCount)+" "+catStr) - pdf.Text(175, y, "{impact-analysis-initial-risks}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Risk Mitigation") - pdf.Text(175, y, "{risk-mitigation-status}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - risks = "Risks" - catStr = "Categories" - count, catCount = len(model.FilteredByStillAtRisk()), len(model.CategoriesOfOnlyRisksStillAtRisk(model.GeneratedRisksByCategory)) - if count == 1 { - risks = "Risk" - } - if catCount == 1 { - catStr = "Category" - } - pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Remaining "+risks+" in "+strconv.Itoa(catCount)+" "+catStr) - pdf.Text(175, y, "{impact-analysis-remaining-risks}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Application Overview") - pdf.Text(175, y, "{target-overview}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Data-Flow Diagram") - pdf.Text(175, y, "{data-flow-diagram}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Security Requirements") - pdf.Text(175, y, "{security-requirements}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Abuse Cases") - pdf.Text(175, y, "{abuse-cases}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Tag Listing") - pdf.Text(175, y, "{tag-listing}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"STRIDE Classification of Identified Risks") - pdf.Text(175, y, "{stride}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Assignment by Function") - pdf.Text(175, y, "{function-assignment}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"RAA Analysis") - pdf.Text(175, y, "{raa-analysis}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - pdf.Text(11, y, " "+"Data Mapping") - pdf.Text(175, y, "{data-risk-mapping}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - /* - y += 6 - assets := "assets" - count = len(model.SortedTechnicalAssetsByQuickWinsAndTitle()) - if count == 1 { - assets = "asset" - } - pdf.Text(11, y, " "+"Data Risk Quick Wins: "+strconv.Itoa(count)+" "+assets) - pdf.Text(175, y, "{data-risk-quick-wins}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - */ - - y += 6 - assets := "Assets" - count = len(model.OutOfScopeTechnicalAssets()) - if count == 1 { - assets = "Asset" - } - pdf.Text(11, y, " "+"Out-of-Scope Assets: "+strconv.Itoa(count)+" "+assets) - pdf.Text(175, y, "{out-of-scope-assets}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - modelFailures := model.FlattenRiskSlice(model.FilterByModelFailures(model.GeneratedRisksByCategory)) - risks = "Risks" - count = len(modelFailures) - if count == 1 { - risks = "Risk" - } - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(modelFailures)) - if countStillAtRisk > 0 { - colors.ColorModelFailure(pdf) - } - pdf.Text(11, y, " "+"Potential Model Failures: "+strconv.Itoa(countStillAtRisk)+" / "+strconv.Itoa(count)+" "+risks) - pdf.Text(175, y, "{model-failures}") - pdfColorBlack() - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - y += 6 - questions := "Questions" - count = len(model.ParsedModelRoot.Questions) - if count == 1 { - questions = "Question" - } - if model.QuestionsUnanswered() > 0 { - colors.ColorModelFailure(pdf) - } - pdf.Text(11, y, " "+"Questions: "+strconv.Itoa(model.QuestionsUnanswered())+" / "+strconv.Itoa(count)+" "+questions) - pdf.Text(175, y, "{questions}") - pdfColorBlack() - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - - // =============== - - if len(model.GeneratedRisksByCategory) > 0 { - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - pdf.Text(11, y, "Risks by Vulnerability Category") - pdf.SetFont("Helvetica", "", fontSizeBody) - y += 6 - pdf.Text(11, y, " "+"Identified Risks by Vulnerability Category") - pdf.Text(175, y, "{intro-risks-by-vulnerability-category}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - for _, category := range model.SortedRiskCategories() { - risks := model.SortedRisksOfCategory(category) - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - pdf.Text(11, y, " "+uni(category.Title)+": "+suffix) - pdf.Text(175, y, "{"+category.Id+"}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - tocLinkIdByAssetId[category.Id] = pdf.AddLink() - pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[category.Id]) - } - } - - // =============== - - if len(model.ParsedModelRoot.TechnicalAssets) > 0 { - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - pdf.Text(11, y, "Risks by Technical Asset") - pdf.SetFont("Helvetica", "", fontSizeBody) - y += 6 - pdf.Text(11, y, " "+"Identified Risks by Technical Asset") - pdf.Text(175, y, "{intro-risks-by-technical-asset}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - for _, technicalAsset := range model.SortedTechnicalAssetsByRiskSeverityAndTitle() { - risks := technicalAsset.GeneratedRisks() - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - if technicalAsset.OutOfScope { - pdfColorOutOfScope() - suffix = "out-of-scope" - } else { - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - } - pdf.Text(11, y, " "+uni(technicalAsset.Title)+": "+suffix) - pdf.Text(175, y, "{"+technicalAsset.Id+"}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - tocLinkIdByAssetId[technicalAsset.Id] = pdf.AddLink() - pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[technicalAsset.Id]) - } - } - - // =============== - - if len(model.ParsedModelRoot.DataAssets) > 0 { - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdfColorBlack() - pdf.Text(11, y, "Data Breach Probabilities by Data Asset") - pdf.SetFont("Helvetica", "", fontSizeBody) - y += 6 - pdf.Text(11, y, " "+"Identified Data Breach Probabilities by Data Asset") - pdf.Text(175, y, "{intro-risks-by-data-asset}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - for _, dataAsset := range model.SortedDataAssetsByDataBreachProbabilityAndTitle() { - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - risks := dataAsset.IdentifiedDataBreachProbabilityRisks() - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() { - case model.Probable: - colors.ColorHighRisk(pdf) - case model.Possible: - colors.ColorMediumRisk(pdf) - case model.Improbable: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if !dataAsset.IsDataBreachPotentialStillAtRisk() { - pdfColorBlack() - } - pdf.Text(11, y, " "+uni(dataAsset.Title)+": "+suffix) - pdf.Text(175, y, "{data:"+dataAsset.Id+"}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - tocLinkIdByAssetId[dataAsset.Id] = pdf.AddLink() - pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[dataAsset.Id]) - } - } - - // =============== - - if len(model.ParsedModelRoot.TrustBoundaries) > 0 { - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdfColorBlack() - pdf.Text(11, y, "Trust Boundaries") - pdf.SetFont("Helvetica", "", fontSizeBody) - for _, key := range model.SortedKeysOfTrustBoundaries() { - trustBoundary := model.ParsedModelRoot.TrustBoundaries[key] - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - colors.ColorTwilight(pdf) - if !trustBoundary.Type.IsNetworkBoundary() { - pdfColorLightGray() - } - pdf.Text(11, y, " "+uni(trustBoundary.Title)) - pdf.Text(175, y, "{boundary:"+trustBoundary.Id+"}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - tocLinkIdByAssetId[trustBoundary.Id] = pdf.AddLink() - pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[trustBoundary.Id]) - } - pdfColorBlack() - } - - // =============== - - if len(model.ParsedModelRoot.SharedRuntimes) > 0 { - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdfColorBlack() - pdf.Text(11, y, "Shared Runtime") - pdf.SetFont("Helvetica", "", fontSizeBody) - for _, key := range model.SortedKeysOfSharedRuntime() { - sharedRuntime := model.ParsedModelRoot.SharedRuntimes[key] - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - pdf.Text(11, y, " "+uni(sharedRuntime.Title)) - pdf.Text(175, y, "{runtime:"+sharedRuntime.Id+"}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - tocLinkIdByAssetId[sharedRuntime.Id] = pdf.AddLink() - pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[sharedRuntime.Id]) - } - } - - // =============== - - y += 6 - y += 6 - if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen" - pageBreakInLists() - y = 40 - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Text(11, y, "About Threagile") - pdf.SetFont("Helvetica", "", fontSizeBody) - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - pdf.Text(11, y, " "+"Risk Rules Checked by Threagile") - pdf.Text(175, y, "{risk-rules-checked}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - y += 6 - if y > 275 { - pageBreakInLists() - y = 40 - } - pdfColorDisclaimer() - pdf.Text(11, y, " "+"Disclaimer") - pdf.Text(175, y, "{disclaimer}") - pdf.Line(15.6, y+1.3, 11+171.5, y+1.3) - pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink()) - pdfColorBlack() - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) - - // Now write all the sections/pages. Before we start writing, we use `RegisterAlias` to - // ensure that the alias written in the table of contents will be replaced - // by the current page number. --> See the "pdf.RegisterAlias()" calls during the PDF creation in this file -} - -func defineLinkTarget(alias string) { - pageNumbStr := strconv.Itoa(pdf.PageNo()) - if len(pageNumbStr) == 1 { - pageNumbStr = " " + pageNumbStr - } else if len(pageNumbStr) == 2 { - pageNumbStr = " " + pageNumbStr - } - pdf.RegisterAlias(alias, pageNumbStr) - pdf.SetLink(linkCounter, 0, -1) - linkCounter++ -} - -func createDisclaimer() { - pdf.AddPage() - currentChapterTitleBreadcrumb = "Disclaimer" - defineLinkTarget("{disclaimer}") - gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300) - pdfColorDisclaimer() - pdf.SetFont("Helvetica", "B", fontSizeHeadline) - pdf.Text(11, 40, "Disclaimer") - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetY(46) - - var disclaimer strings.Builder - disclaimer.WriteString(model.ParsedModelRoot.Author.Name + " conducted this threat analysis using the open-source Threagile toolkit " + - "on the applications and systems that were modeled as of this report's date. " + - "Information security threats are continually changing, with new " + - "vulnerabilities discovered on a daily basis, and no application can ever be 100% secure no matter how much " + - "threat modeling is conducted. It is recommended to execute threat modeling and also penetration testing on a regular basis " + - "(for example yearly) to ensure a high ongoing level of security and constantly check for new attack vectors. " + - "

" + - "This report cannot and does not protect against personal or business loss as the result of use of the " + - "applications or systems described. " + model.ParsedModelRoot.Author.Name + " and the Threagile toolkit offers no warranties, representations or " + - "legal certifications concerning the applications or systems it tests. All software includes defects: nothing " + - "in this document is intended to represent or warrant that threat modeling was complete and without error, " + - "nor does this document represent or warrant that the architecture analyzed is suitable to task, free of other " + - "defects than reported, fully compliant with any industry standards, or fully compatible with any operating " + - "system, hardware, or other application. Threat modeling tries to analyze the modeled architecture without " + - "having access to a real working system and thus cannot and does not test the implementation for defects and vulnerabilities. " + - "These kinds of checks would only be possible with a separate code review and penetration test against " + - "a working system and not via a threat model." + - "

" + - "By using the resulting information you agree that " + model.ParsedModelRoot.Author.Name + " and the Threagile toolkit " + - "shall be held harmless in any event." + - "

" + - "This report is confidential and intended for internal, confidential use by the client. The recipient " + - "is obligated to ensure the highly confidential contents are kept secret. The recipient assumes responsibility " + - "for further distribution of this document." + - "

" + - "In this particular project, a timebox approach was used to define the analysis effort. This means that the " + - "author allotted a prearranged amount of time to identify and document threats. Because of this, there " + - "is no guarantee that all possible threats and risks are discovered. Furthermore, the analysis " + - "applies to a snapshot of the current state of the modeled architecture (based on the architecture information provided " + - "by the customer) at the examination time." + - "


" + - "Report Distribution" + - "

" + - "Distribution of this report (in full or in part like diagrams or risk findings) requires that this disclaimer " + - "as well as the chapter about the Threagile toolkit and method used is kept intact as part of the " + - "distributed report or referenced from the distributed parts.") - html := pdf.HTMLBasicNew() - html.Write(5, disclaimer.String()) - pdfColorBlack() -} - -func createManagementSummary() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - title := "Management Summary" - addHeadline(title, false) - defineLinkTarget("{management-summary}") - currentChapterTitleBreadcrumb = title - countCritical := len(model.FilteredByOnlyCriticalRisks()) - countHigh := len(model.FilteredByOnlyHighRisks()) - countElevated := len(model.FilteredByOnlyElevatedRisks()) - countMedium := len(model.FilteredByOnlyMediumRisks()) - countLow := len(model.FilteredByOnlyLowRisks()) - - countStatusUnchecked := len(model.FilteredByRiskTrackingUnchecked()) - countStatusInDiscussion := len(model.FilteredByRiskTrackingInDiscussion()) - countStatusAccepted := len(model.FilteredByRiskTrackingAccepted()) - countStatusInProgress := len(model.FilteredByRiskTrackingInProgress()) - countStatusMitigated := len(model.FilteredByRiskTrackingMitigated()) - countStatusFalsePositive := len(model.FilteredByRiskTrackingFalsePositive()) - - html := pdf.HTMLBasicNew() - html.Write(5, "Threagile toolkit was used to model the architecture of \""+uni(model.ParsedModelRoot.Title)+"\" "+ - "and derive risks by analyzing the components and data flows. The risks identified during this analysis are shown "+ - "in the following chapters. Identified risks during threat modeling do not necessarily mean that the "+ - "vulnerability associated with this risk actually exists: it is more to be seen as a list of potential risks and "+ - "threats, which should be individually reviewed and reduced by removing false positives. For the remaining risks it should "+ - "be checked in the design and implementation of \""+uni(model.ParsedModelRoot.Title)+"\" whether the mitigation advices "+ - "have been applied or not."+ - "

"+ - "Each risk finding references a chapter of the OWASP ASVS (Application Security Verification Standard) audit checklist. "+ - "The OWASP ASVS checklist should be considered as an inspiration by architects and developers to further harden "+ - "the application in a Defense-in-Depth approach. Additionally, for each risk finding a "+ - "link towards a matching OWASP Cheat Sheet or similar with technical details about how to implement a mitigation is given."+ - "

"+ - "In total "+strconv.Itoa(model.TotalRiskCount())+" initial risks in "+strconv.Itoa(len(model.GeneratedRisksByCategory))+" categories have "+ - "been identified during the threat modeling process:

") // TODO plural singular stuff risk/s category/ies has/have - - pdf.SetFont("Helvetica", "B", fontSizeBody) - - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "") - colors.ColorRiskStatusUnchecked(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "") - pdf.Ln(-1) - - colors.ColorCriticalRisk(pdf) - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "critical risk", "0", 0, "", false, 0, "") - colors.ColorRiskStatusInDiscussion(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "") - pdf.Ln(-1) - - colors.ColorHighRisk(pdf) - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "high risk", "0", 0, "", false, 0, "") - colors.ColorRiskStatusAccepted(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "") - pdf.Ln(-1) - - colors.ColorElevatedRisk(pdf) - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "elevated risk", "0", 0, "", false, 0, "") - colors.ColorRiskStatusInProgress(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "") - pdf.Ln(-1) - - colors.ColorMediumRisk(pdf) - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "medium risk", "0", 0, "", false, 0, "") - colors.ColorRiskStatusMitigated(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "") - pdf.SetFont("Helvetica", "BI", fontSizeBody) - pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(-1) - - colors.ColorLowRisk(pdf) - pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "low risk", "0", 0, "", false, 0, "") - colors.ColorRiskStatusFalsePositive(pdf) - pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "") - pdf.SetFont("Helvetica", "BI", fontSizeBody) - pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(-1) - - pdf.SetFont("Helvetica", "", fontSizeBody) - - // pie chart: risk severity - pieChartRiskSeverity := chart.PieChart{ - Width: 1500, - Height: 1500, - Values: []chart.Value{ - {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorLowRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorLowRisk()), - FontSize: 65}}, - {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorMediumRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorMediumRisk()), - FontSize: 65}}, - {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorElevatedRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorElevatedRisk()), - FontSize: 65}}, - {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorHighRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorHighRisk()), - FontSize: 65}}, - {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorCriticalRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorCriticalRisk()), - FontSize: 65}}, - }, - } - - // pie chart: risk status - pieChartRiskStatus := chart.PieChart{ - Width: 1500, - Height: 1500, - Values: []chart.Value{ - {Value: float64(countStatusFalsePositive), //Label: strconv.Itoa(countStatusFalsePositive) + " False Positive", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()), - FontSize: 65}}, - {Value: float64(countStatusMitigated), //Label: strconv.Itoa(countStatusMitigated) + " Mitigated", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusMitigated()), - FontSize: 65}}, - {Value: float64(countStatusInProgress), //Label: strconv.Itoa(countStatusInProgress) + " InProgress", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusInProgress()), - FontSize: 65}}, - {Value: float64(countStatusAccepted), //Label: strconv.Itoa(countStatusAccepted) + " Accepted", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusAccepted()), - FontSize: 65}}, - {Value: float64(countStatusInDiscussion), //Label: strconv.Itoa(countStatusInDiscussion) + " InDiscussion", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()), - FontSize: 65}}, - {Value: float64(countStatusUnchecked), //Label: strconv.Itoa(countStatusUnchecked) + " Unchecked", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()), - FontSize: 65}}, - }, - } - - y := pdf.GetY() + 5 - embedPieChart(pieChartRiskSeverity, 15.0, y) - embedPieChart(pieChartRiskStatus, 110.0, y) - - // individual management summary comment - pdfColorBlack() - if len(model.ParsedModelRoot.ManagementSummaryComment) > 0 { - html.Write(5, "















"+ - model.ParsedModelRoot.ManagementSummaryComment) - } -} - -func createRiskMitigationStatus() { - pdf.SetTextColor(0, 0, 0) - stillAtRisk := model.FilteredByStillAtRisk() - count := len(stillAtRisk) - title := "Risk Mitigation" - addHeadline(title, false) - defineLinkTarget("{risk-mitigation-status}") - currentChapterTitleBreadcrumb = title - - html := pdf.HTMLBasicNew() - html.Write(5, "The following chart gives a high-level overview of the risk tracking status (including mitigated risks):") - - risksCritical := model.FilteredByOnlyCriticalRisks() - risksHigh := model.FilteredByOnlyHighRisks() - risksElevated := model.FilteredByOnlyElevatedRisks() - risksMedium := model.FilteredByOnlyMediumRisks() - risksLow := model.FilteredByOnlyLowRisks() - - countStatusUnchecked := len(model.FilteredByRiskTrackingUnchecked()) - countStatusInDiscussion := len(model.FilteredByRiskTrackingInDiscussion()) - countStatusAccepted := len(model.FilteredByRiskTrackingAccepted()) - countStatusInProgress := len(model.FilteredByRiskTrackingInProgress()) - countStatusMitigated := len(model.FilteredByRiskTrackingMitigated()) - countStatusFalsePositive := len(model.FilteredByRiskTrackingFalsePositive()) - - stackedBarChartRiskTracking := chart.StackedBarChart{ - Width: 4000, - //Height: 2500, - XAxis: chart.Style{Show: false, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom}, - YAxis: chart.Style{Show: true, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom}, - Bars: []chart.StackedBar{ - { - Name: model.LowSeverity.Title(), - Width: 130, - Values: []chart.Value{ - {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksLow))), Label: model.Unchecked.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksLow))), Label: model.InDiscussion.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksLow))), Label: model.Accepted.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksLow))), Label: model.InProgress.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksLow))), Label: model.Mitigated.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksLow))), Label: model.FalsePositive.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - }, - }, - { - Name: model.MediumSeverity.Title(), - Width: 130, - Values: []chart.Value{ - {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksMedium))), Label: model.Unchecked.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksMedium))), Label: model.InDiscussion.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksMedium))), Label: model.Accepted.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksMedium))), Label: model.InProgress.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksMedium))), Label: model.Mitigated.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksMedium))), Label: model.FalsePositive.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - }, - }, - { - Name: model.ElevatedSeverity.Title(), - Width: 130, - Values: []chart.Value{ - {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksElevated))), Label: model.Unchecked.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksElevated))), Label: model.InDiscussion.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksElevated))), Label: model.Accepted.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksElevated))), Label: model.InProgress.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksElevated))), Label: model.Mitigated.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksElevated))), Label: model.FalsePositive.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - }, - }, - { - Name: model.HighSeverity.Title(), - Width: 130, - Values: []chart.Value{ - {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksHigh))), Label: model.Unchecked.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksHigh))), Label: model.InDiscussion.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksHigh))), Label: model.Accepted.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksHigh))), Label: model.InProgress.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksHigh))), Label: model.Mitigated.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksHigh))), Label: model.FalsePositive.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - }, - }, - { - Name: model.CriticalSeverity.Title(), - Width: 130, - Values: []chart.Value{ - {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksCritical))), Label: model.Unchecked.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksCritical))), Label: model.InDiscussion.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksCritical))), Label: model.Accepted.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksCritical))), Label: model.InProgress.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksCritical))), Label: model.Mitigated.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksCritical))), Label: model.FalsePositive.Title(), - Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}}, - }, - }, - }, - } - - y := pdf.GetY() + 12 - embedStackedBarChart(stackedBarChartRiskTracking, 15.0, y) - - // draw the X-Axis legend on my own - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorBlack() - pdf.Text(24.02, 169, "Low ("+strconv.Itoa(len(risksLow))+")") - pdf.Text(46.10, 169, "Medium ("+strconv.Itoa(len(risksMedium))+")") - pdf.Text(69.74, 169, "Elevated ("+strconv.Itoa(len(risksElevated))+")") - pdf.Text(97.95, 169, "High ("+strconv.Itoa(len(risksHigh))+")") - pdf.Text(121.65, 169, "Critical ("+strconv.Itoa(len(risksCritical))+")") - - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(20) - - colors.ColorRiskStatusUnchecked(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorRiskStatusInDiscussion(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorRiskStatusAccepted(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorRiskStatusInProgress(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorRiskStatusMitigated(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "") - pdf.SetFont("Helvetica", "BI", fontSizeBody) - pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(-1) - colors.ColorRiskStatusFalsePositive(pdf) - pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "") - pdf.SetFont("Helvetica", "BI", fontSizeBody) - pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(-1) - - pdf.SetFont("Helvetica", "", fontSizeBody) - - pdfColorBlack() - if count == 0 { - html.Write(5, "














"+ - "After removal of risks with status mitigated and false positive "+ - ""+strconv.Itoa(count)+" remain unmitigated.") - } else { - html.Write(5, "














"+ - "After removal of risks with status mitigated and false positive "+ - "the following "+strconv.Itoa(count)+" remain unmitigated:") - - countCritical := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyCriticalRisks())) - countHigh := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyHighRisks())) - countElevated := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyElevatedRisks())) - countMedium := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyMediumRisks())) - countLow := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyLowRisks())) - - countBusinessSide := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyBusinessSide())) - countArchitecture := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyArchitecture())) - countDevelopment := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyDevelopment())) - countOperation := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyOperation())) - - pieChartRemainingRiskSeverity := chart.PieChart{ - Width: 1500, - Height: 1500, - Values: []chart.Value{ - {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorLowRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorLowRisk()), - FontSize: 65}}, - {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorMediumRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorMediumRisk()), - FontSize: 65}}, - {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorElevatedRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorElevatedRisk()), - FontSize: 65}}, - {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorHighRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorHighRisk()), - FontSize: 65}}, - {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical", - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorCriticalRisk()).WithAlpha(98), - //FontColor: makeColor(colors.RgbHexColorCriticalRisk()), - FontSize: 65}}, - }, - } - - pieChartRemainingRisksByFunction := chart.PieChart{ - Width: 1500, - Height: 1500, - Values: []chart.Value{ - {Value: float64(countBusinessSide), - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorBusiness()).WithAlpha(98), - FontSize: 65}}, - {Value: float64(countArchitecture), - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorArchitecture()).WithAlpha(98), - FontSize: 65}}, - {Value: float64(countDevelopment), - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorDevelopment()).WithAlpha(98), - FontSize: 65}}, - {Value: float64(countOperation), - Style: chart.Style{ - FillColor: makeColor(colors.RgbHexColorOperation()).WithAlpha(98), - FontSize: 65}}, - }, - } - - embedPieChart(pieChartRemainingRiskSeverity, 15.0, 216) - embedPieChart(pieChartRemainingRisksByFunction, 110.0, 216) - - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.Ln(8) - - colors.ColorCriticalRisk(pdf) - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unmitigated critical risk", "0", 0, "", false, 0, "") - pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, "", "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorHighRisk(pdf) - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unmitigated high risk", "0", 0, "", false, 0, "") - colors.ColorBusiness(pdf) - pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countBusinessSide), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "business side related", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorElevatedRisk(pdf) - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unmitigated elevated risk", "0", 0, "", false, 0, "") - colors.ColorArchitecture(pdf) - pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countArchitecture), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "architecture related", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorMediumRisk(pdf) - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unmitigated medium risk", "0", 0, "", false, 0, "") - colors.ColorDevelopment(pdf) - pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countDevelopment), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "development related", "0", 0, "", false, 0, "") - pdf.Ln(-1) - colors.ColorLowRisk(pdf) - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "unmitigated low risk", "0", 0, "", false, 0, "") - colors.ColorOperation(pdf) - pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(10, 6, strconv.Itoa(countOperation), "0", 0, "R", false, 0, "") - pdf.CellFormat(60, 6, "operations related", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - } -} - -// CAUTION: Long labels might cause endless loop, then remove labels and render them manually later inside the PDF -func embedStackedBarChart(sbcChart chart.StackedBarChart, x float64, y float64) { - tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "chart-*-.png") - checkErr(err) - defer os.Remove(tmpFilePNG.Name()) - file, _ := os.Create(tmpFilePNG.Name()) - defer file.Close() - err = sbcChart.Render(chart.PNG, file) - checkErr(err) - var options gofpdf.ImageOptions - options.ImageType = "" - pdf.RegisterImage(tmpFilePNG.Name(), "") - pdf.ImageOptions(tmpFilePNG.Name(), x, y, 0, 110, false, options, 0, "") -} - -func embedPieChart(pieChart chart.PieChart, x float64, y float64) { - tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "chart-*-.png") - checkErr(err) - defer os.Remove(tmpFilePNG.Name()) - file, err := os.Create(tmpFilePNG.Name()) - checkErr(err) - defer file.Close() - err = pieChart.Render(chart.PNG, file) - checkErr(err) - var options gofpdf.ImageOptions - options.ImageType = "" - pdf.RegisterImage(tmpFilePNG.Name(), "") - pdf.ImageOptions(tmpFilePNG.Name(), x, y, 60, 0, false, options, 0, "") -} - -func makeColor(hexColor string) drawing.Color { - _, i := utf8.DecodeRuneInString(hexColor) - return drawing.ColorFromHex(hexColor[i:]) // = remove first char, which is # in rgb hex here -} - -func createImpactInitialRisks() { - renderImpactAnalysis(true) -} - -func createImpactRemainingRisks() { - renderImpactAnalysis(false) -} - -func renderImpactAnalysis(initialRisks bool) { - pdf.SetTextColor(0, 0, 0) - count, catCount := model.TotalRiskCount(), len(model.GeneratedRisksByCategory) - if !initialRisks { - count, catCount = len(model.FilteredByStillAtRisk()), len(model.CategoriesOfOnlyRisksStillAtRisk(model.GeneratedRisksByCategory)) - } - riskStr, catStr := "Risks", "Categories" - if count == 1 { - riskStr = "Risk" - } - if catCount == 1 { - catStr = "Category" - } - if initialRisks { - chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Initial " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr - addHeadline(chapTitle, false) - defineLinkTarget("{impact-analysis-initial-risks}") - currentChapterTitleBreadcrumb = chapTitle - } else { - chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Remaining " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr - addHeadline(chapTitle, false) - defineLinkTarget("{impact-analysis-remaining-risks}") - currentChapterTitleBreadcrumb = chapTitle - } - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - riskStr = "risks" - if count == 1 { - riskStr = "risk" - } - initialStr := "initial" - if !initialRisks { - initialStr = "remaining" - } - strBuilder.WriteString("The most prevalent impacts of the " + strconv.Itoa(count) + " " + - initialStr + " " + riskStr + " (distributed over " + strconv.Itoa(catCount) + " risk categories) are " + - "(taking the severity ratings into account and using the highest for each category):
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - addCategories(model.CategoriesOfOnlyCriticalRisks(model.GeneratedRisksByCategory, initialRisks), - model.CriticalSeverity, false, initialRisks, true, false) - addCategories(model.CategoriesOfOnlyHighRisks(model.GeneratedRisksByCategory, initialRisks), - model.HighSeverity, false, initialRisks, true, false) - addCategories(model.CategoriesOfOnlyElevatedRisks(model.GeneratedRisksByCategory, initialRisks), - model.ElevatedSeverity, false, initialRisks, true, false) - addCategories(model.CategoriesOfOnlyMediumRisks(model.GeneratedRisksByCategory, initialRisks), - model.MediumSeverity, false, initialRisks, true, false) - addCategories(model.CategoriesOfOnlyLowRisks(model.GeneratedRisksByCategory, initialRisks), - model.LowSeverity, false, initialRisks, true, false) - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -func createOutOfScopeAssets() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - assets := "Assets" - count := len(model.OutOfScopeTechnicalAssets()) - if count == 1 { - assets = "Asset" - } - chapTitle := "Out-of-Scope Assets: " + strconv.Itoa(count) + " " + assets - addHeadline(chapTitle, false) - defineLinkTarget("{out-of-scope-assets}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - strBuilder.WriteString("This chapter lists all technical assets that have been defined as out-of-scope. " + - "Each one should be checked in the model whether it should better be included in the " + - "overall risk analysis:
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - outOfScopeAssetCount := 0 - for _, technicalAsset := range model.SortedTechnicalAssetsByRAAAndTitle() { - if technicalAsset.OutOfScope { - outOfScopeAssetCount++ - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - strBuilder.WriteString("

") - } - html.Write(5, strBuilder.String()) - strBuilder.Reset() - posY := pdf.GetY() - pdfColorOutOfScope() - strBuilder.WriteString("") - strBuilder.WriteString(uni(technicalAsset.Title)) - strBuilder.WriteString("") - strBuilder.WriteString(": out-of-scope") - strBuilder.WriteString("
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetTextColor(0, 0, 0) - strBuilder.WriteString(uni(technicalAsset.JustificationOutOfScope)) - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id]) - } - } - - if outOfScopeAssetCount == 0 { - pdfColorGray() - html.Write(5, "

No technical assets have been defined as out-of-scope.") - } - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -func createModelFailures() { - pdf.SetTextColor(0, 0, 0) - modelFailures := model.FlattenRiskSlice(model.FilterByModelFailures(model.GeneratedRisksByCategory)) - risks := "Risks" - count := len(modelFailures) - if count == 1 { - risks = "Risk" - } - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(modelFailures)) - if countStillAtRisk > 0 { - colors.ColorModelFailure(pdf) - } - chapTitle := "Potential Model Failures: " + strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(count) + " " + risks - addHeadline(chapTitle, false) - defineLinkTarget("{model-failures}") - currentChapterTitleBreadcrumb = chapTitle - pdfColorBlack() - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - strBuilder.WriteString("This chapter lists potential model failures where not all relevant assets have been " + - "modeled or the model might itself contain inconsistencies. Each potential model failure should be checked " + - "in the model against the architecture design:
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - modelFailuresByCategory := model.FilterByModelFailures(model.GeneratedRisksByCategory) - if len(modelFailuresByCategory) == 0 { - pdfColorGray() - html.Write(5, "

No potential model failures have been identified.") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(modelFailuresByCategory, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(modelFailuresByCategory, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(modelFailuresByCategory, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(modelFailuresByCategory, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(modelFailuresByCategory, true), - model.LowSeverity, true, true, false, true) - } - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -func createRAA(introTextRAA string) { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - chapTitle := "RAA Analysis" - addHeadline(chapTitle, false) - defineLinkTarget("{raa-analysis}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - strBuilder.WriteString(introTextRAA) - strBuilder.WriteString("
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - for _, technicalAsset := range model.SortedTechnicalAssetsByRAAAndTitle() { - if technicalAsset.OutOfScope { - continue - } - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - strBuilder.WriteString("

") - } - risks := technicalAsset.GeneratedRisks() - switch model.HighestSeverityStillAtRisk(risks) { - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - - html.Write(5, strBuilder.String()) - strBuilder.Reset() - posY := pdf.GetY() - strBuilder.WriteString("") - strBuilder.WriteString(uni(technicalAsset.Title)) - strBuilder.WriteString("") - if technicalAsset.OutOfScope { - strBuilder.WriteString(": out-of-scope") - } else { - strBuilder.WriteString(": RAA ") - strBuilder.WriteString(fmt.Sprintf("%.0f", technicalAsset.RAA)) - strBuilder.WriteString("%") - } - strBuilder.WriteString("
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetTextColor(0, 0, 0) - strBuilder.WriteString(uni(technicalAsset.Description)) - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id]) - } - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -/* -func createDataRiskQuickWins() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - assets := "assets" - count := len(model.SortedTechnicalAssetsByQuickWinsAndTitle()) - if count == 1 { - assets = "asset" - } - chapTitle := "Data Risk Quick Wins: " + strconv.Itoa(count) + " " + assets - addHeadline(chapTitle, false) - defineLinkTarget("{data-risk-quick-wins}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - strBuilder.WriteString("For each technical asset it was checked how many data assets at risk might " + - "get their risk-rating reduced (partly or fully) when the risks of the technical asset are mitigated. " + - "In general, that means the higher the quick win value is, the more data assets (left side of the Data Risk Mapping diagram) " + - "turn from red to amber or from amber to blue by mitigating the technical asset's risks. " + - "This list can be used to prioritize on efforts with the greatest effects of reducing data asset risks:
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - for _, technicalAsset := range model.SortedTechnicalAssetsByQuickWinsAndTitle() { - quickWins := technicalAsset.QuickWins() - if pdf.GetY() > 260 { - pageBreak() - pdf.SetY(36) - } else { - strBuilder.WriteString("

") - } - risks := technicalAsset.GeneratedRisks() - switch model.HighestSeverityStillAtRisk(risks) { - case model.High: - colors.ColorHighRisk(pdf) - case model.Medium: - colors.ColorMediumRisk(pdf) - case model.Low: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - - html.Write(5, strBuilder.String()) - strBuilder.Reset() - posY := pdf.GetY() - strBuilder.WriteString("") - strBuilder.WriteString(uni(technicalAsset.Title)) - strBuilder.WriteString("") - strBuilder.WriteString(": ") - strBuilder.WriteString(fmt.Sprintf("%.2f", quickWins)) - strBuilder.WriteString(" Quick Wins") - strBuilder.WriteString("
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetTextColor(0, 0, 0) - strBuilder.WriteString(uni(technicalAsset.Description)) - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id]) - } - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} -*/ - -func addCategories(riskCategories []model.RiskCategory, severity model.RiskSeverity, bothInitialAndRemainingRisks bool, initialRisks bool, describeImpact bool, describeDescription bool) { - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - sort.Sort(model.ByRiskCategoryTitleSort(riskCategories)) - for _, riskCategory := range riskCategories { - risks := model.GeneratedRisksByCategory[riskCategory] - if !initialRisks { - risks = model.ReduceToOnlyStillAtRisk(risks) - } - if len(risks) == 0 { - continue - } - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - strBuilder.WriteString("

") - } - var prefix string - switch severity { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - prefix = "Critical: " - case model.HighSeverity: - colors.ColorHighRisk(pdf) - prefix = "High: " - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - prefix = "Elevated: " - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - prefix = "Medium: " - case model.LowSeverity: - colors.ColorLowRisk(pdf) - prefix = "Low: " - default: - pdfColorBlack() - prefix = "" - } - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - html.Write(5, strBuilder.String()) - strBuilder.Reset() - posY := pdf.GetY() - strBuilder.WriteString(prefix) - strBuilder.WriteString("") - strBuilder.WriteString(riskCategory.Title) - strBuilder.WriteString(": ") - count := len(risks) - initialStr := "Initial" - if !initialRisks { - initialStr = "Remaining" - } - remainingRisks := model.ReduceToOnlyStillAtRisk(risks) - suffix := strconv.Itoa(count) + " " + initialStr + " Risk" - if bothInitialAndRemainingRisks { - suffix = strconv.Itoa(len(remainingRisks)) + " / " + strconv.Itoa(count) + " Risk" - } - if count != 1 { - suffix += "s" - } - suffix += " - Exploitation likelihood is " - if initialRisks { - suffix += model.HighestExploitationLikelihood(risks).Title() + " with " + model.HighestExploitationImpact(risks).Title() + " impact." - } else { - suffix += model.HighestExploitationLikelihood(remainingRisks).Title() + " with " + model.HighestExploitationImpact(remainingRisks).Title() + " impact." - } - strBuilder.WriteString(suffix + "
") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.SetTextColor(0, 0, 0) - if describeImpact { - strBuilder.WriteString(firstParagraph(riskCategory.Impact)) - } else if describeDescription { - strBuilder.WriteString(firstParagraph(riskCategory.Description)) - } else { - strBuilder.WriteString(firstParagraph(riskCategory.Mitigation)) - } - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[riskCategory.Id]) - } -} - -func firstParagraph(text string) string { - match := firstParagraphRegEx.FindStringSubmatch(text) - if len(match) == 0 { - return text - } - return match[1] -} - -func createAssignmentByFunction() { - pdf.SetTextColor(0, 0, 0) - title := "Assignment by Function" - addHeadline(title, false) - defineLinkTarget("{function-assignment}") - currentChapterTitleBreadcrumb = title - - risksBusinessSideFunction := model.RisksOfOnlyBusinessSide(model.GeneratedRisksByCategory) - risksArchitectureFunction := model.RisksOfOnlyArchitecture(model.GeneratedRisksByCategory) - risksDevelopmentFunction := model.RisksOfOnlyDevelopment(model.GeneratedRisksByCategory) - risksOperationFunction := model.RisksOfOnlyOperation(model.GeneratedRisksByCategory) - - countBusinessSideFunction := model.CountRisks(risksBusinessSideFunction) - countArchitectureFunction := model.CountRisks(risksArchitectureFunction) - countDevelopmentFunction := model.CountRisks(risksDevelopmentFunction) - countOperationFunction := model.CountRisks(risksOperationFunction) - var intro strings.Builder - intro.WriteString("This chapter clusters and assigns the risks by functions which are most likely able to " + - "check and mitigate them: " + - "In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " + - "of which " + strconv.Itoa(countBusinessSideFunction) + " should be checked by " + model.BusinessSide.Title() + ", " + - "" + strconv.Itoa(countArchitectureFunction) + " should be checked by " + model.Architecture.Title() + ", " + - "" + strconv.Itoa(countDevelopmentFunction) + " should be checked by " + model.Development.Title() + ", " + - "and " + strconv.Itoa(countOperationFunction) + " should be checked by " + model.Operations.Title() + ".
") - html := pdf.HTMLBasicNew() - html.Write(5, intro.String()) - intro.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - oldLeft, _, _, _ := pdf.GetMargins() - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.BusinessSide.Title()+"") - pdf.SetLeftMargin(15) - if len(risksBusinessSideFunction) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksBusinessSideFunction, true), - model.CriticalSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyHighRisks(risksBusinessSideFunction, true), - model.HighSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksBusinessSideFunction, true), - model.ElevatedSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyMediumRisks(risksBusinessSideFunction, true), - model.MediumSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyLowRisks(risksBusinessSideFunction, true), - model.LowSeverity, true, true, false, false) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Architecture.Title()+"") - pdf.SetLeftMargin(15) - if len(risksArchitectureFunction) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksArchitectureFunction, true), - model.CriticalSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyHighRisks(risksArchitectureFunction, true), - model.HighSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksArchitectureFunction, true), - model.ElevatedSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyMediumRisks(risksArchitectureFunction, true), - model.MediumSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyLowRisks(risksArchitectureFunction, true), - model.LowSeverity, true, true, false, false) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Development.Title()+"") - pdf.SetLeftMargin(15) - if len(risksDevelopmentFunction) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksDevelopmentFunction, true), - model.CriticalSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyHighRisks(risksDevelopmentFunction, true), - model.HighSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksDevelopmentFunction, true), - model.ElevatedSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyMediumRisks(risksDevelopmentFunction, true), - model.MediumSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyLowRisks(risksDevelopmentFunction, true), - model.LowSeverity, true, true, false, false) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Operations.Title()+"") - pdf.SetLeftMargin(15) - if len(risksOperationFunction) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksOperationFunction, true), - model.CriticalSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyHighRisks(risksOperationFunction, true), - model.HighSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksOperationFunction, true), - model.ElevatedSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyMediumRisks(risksOperationFunction, true), - model.MediumSeverity, true, true, false, false) - addCategories(model.CategoriesOfOnlyLowRisks(risksOperationFunction, true), - model.LowSeverity, true, true, false, false) - } - pdf.SetLeftMargin(oldLeft) - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -func createSTRIDE() { - pdf.SetTextColor(0, 0, 0) - title := "STRIDE Classification of Identified Risks" - addHeadline(title, false) - defineLinkTarget("{stride}") - currentChapterTitleBreadcrumb = title - - risksSTRIDESpoofing := model.RisksOfOnlySTRIDESpoofing(model.GeneratedRisksByCategory) - risksSTRIDETampering := model.RisksOfOnlySTRIDETampering(model.GeneratedRisksByCategory) - risksSTRIDERepudiation := model.RisksOfOnlySTRIDERepudiation(model.GeneratedRisksByCategory) - risksSTRIDEInformationDisclosure := model.RisksOfOnlySTRIDEInformationDisclosure(model.GeneratedRisksByCategory) - risksSTRIDEDenialOfService := model.RisksOfOnlySTRIDEDenialOfService(model.GeneratedRisksByCategory) - risksSTRIDEElevationOfPrivilege := model.RisksOfOnlySTRIDEElevationOfPrivilege(model.GeneratedRisksByCategory) - - countSTRIDESpoofing := model.CountRisks(risksSTRIDESpoofing) - countSTRIDETampering := model.CountRisks(risksSTRIDETampering) - countSTRIDERepudiation := model.CountRisks(risksSTRIDERepudiation) - countSTRIDEInformationDisclosure := model.CountRisks(risksSTRIDEInformationDisclosure) - countSTRIDEDenialOfService := model.CountRisks(risksSTRIDEDenialOfService) - countSTRIDEElevationOfPrivilege := model.CountRisks(risksSTRIDEElevationOfPrivilege) - var intro strings.Builder - intro.WriteString("This chapter clusters and classifies the risks by STRIDE categories: " + - "In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " + - "of which " + strconv.Itoa(countSTRIDESpoofing) + " in the " + model.Spoofing.Title() + " category, " + - "" + strconv.Itoa(countSTRIDETampering) + " in the " + model.Tampering.Title() + " category, " + - "" + strconv.Itoa(countSTRIDERepudiation) + " in the " + model.Repudiation.Title() + " category, " + - "" + strconv.Itoa(countSTRIDEInformationDisclosure) + " in the " + model.InformationDisclosure.Title() + " category, " + - "" + strconv.Itoa(countSTRIDEDenialOfService) + " in the " + model.DenialOfService.Title() + " category, " + - "and " + strconv.Itoa(countSTRIDEElevationOfPrivilege) + " in the " + model.ElevationOfPrivilege.Title() + " category.
") - html := pdf.HTMLBasicNew() - html.Write(5, intro.String()) - intro.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - - oldLeft, _, _, _ := pdf.GetMargins() - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Spoofing.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDESpoofing) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDESpoofing, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDESpoofing, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDESpoofing, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDESpoofing, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDESpoofing, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Tampering.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDETampering) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDETampering, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDETampering, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDETampering, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDETampering, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDETampering, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.Repudiation.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDERepudiation) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDERepudiation, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDERepudiation, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDERepudiation, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDERepudiation, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDERepudiation, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.InformationDisclosure.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDEInformationDisclosure) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEInformationDisclosure, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEInformationDisclosure, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEInformationDisclosure, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEInformationDisclosure, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEInformationDisclosure, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.DenialOfService.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDEDenialOfService) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEDenialOfService, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEDenialOfService, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEDenialOfService, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEDenialOfService, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEDenialOfService, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetTextColor(0, 0, 0) - html.Write(5, ""+model.ElevationOfPrivilege.Title()+"") - pdf.SetLeftMargin(15) - if len(risksSTRIDEElevationOfPrivilege) == 0 { - pdf.SetTextColor(150, 150, 150) - html.Write(5, "

n/a") - } else { - addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEElevationOfPrivilege, true), - model.CriticalSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEElevationOfPrivilege, true), - model.HighSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEElevationOfPrivilege, true), - model.ElevatedSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEElevationOfPrivilege, true), - model.MediumSeverity, true, true, false, true) - addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEElevationOfPrivilege, true), - model.LowSeverity, true, true, false, true) - } - pdf.SetLeftMargin(oldLeft) - - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) -} - -func createSecurityRequirements() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - chapTitle := "Security Requirements" - addHeadline(chapTitle, false) - defineLinkTarget("{security-requirements}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - html.Write(5, "This chapter lists the custom security requirements which have been defined for the modeled target.") - pdfColorBlack() - for _, title := range model.SortedKeysOfSecurityRequirements() { - description := model.ParsedModelRoot.SecurityRequirements[title] - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - html.Write(5, ""+uni(title)+"
") - html.Write(5, uni(description)) - } - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - html.Write(5, "This list is not complete and regulatory or law relevant security requirements have to be "+ - "taken into account as well. Also custom individual security requirements might exist for the project.") -} - -func createAbuseCases() { - pdf.SetTextColor(0, 0, 0) - chapTitle := "Abuse Cases" - addHeadline(chapTitle, false) - defineLinkTarget("{abuse-cases}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - html.Write(5, "This chapter lists the custom abuse cases which have been defined for the modeled target.") - pdfColorBlack() - for _, title := range model.SortedKeysOfAbuseCases() { - description := model.ParsedModelRoot.AbuseCases[title] - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - html.Write(5, ""+title+"
") - html.Write(5, description) - } - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - html.Write(5, "This list is not complete and regulatory or law relevant abuse cases have to be "+ - "taken into account as well. Also custom individual abuse cases might exist for the project.") -} - -func createQuestions() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - questions := "Questions" - count := len(model.ParsedModelRoot.Questions) - if count == 1 { - questions = "Question" - } - if model.QuestionsUnanswered() > 0 { - colors.ColorModelFailure(pdf) - } - chapTitle := "Questions: " + strconv.Itoa(model.QuestionsUnanswered()) + " / " + strconv.Itoa(count) + " " + questions - addHeadline(chapTitle, false) - defineLinkTarget("{questions}") - currentChapterTitleBreadcrumb = chapTitle - pdfColorBlack() - - html := pdf.HTMLBasicNew() - html.Write(5, "This chapter lists custom questions that arose during the threat modeling process.") - - if len(model.ParsedModelRoot.Questions) == 0 { - pdfColorLightGray() - html.Write(5, "


") - html.Write(5, "No custom questions arose during the threat modeling process.") - } - pdfColorBlack() - for _, question := range model.SortedKeysOfQuestions() { - answer := model.ParsedModelRoot.Questions[question] - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdfColorBlack() - if len(strings.TrimSpace(answer)) > 0 { - html.Write(5, ""+uni(question)+"
") - html.Write(5, ""+uni(strings.TrimSpace(answer))+"") - } else { - colors.ColorModelFailure(pdf) - html.Write(5, ""+uni(question)+"
") - pdfColorLightGray() - html.Write(5, "- answer pending -") - pdfColorBlack() - } - } -} - -func createTagListing() { - pdf.SetTextColor(0, 0, 0) - chapTitle := "Tag Listing" - addHeadline(chapTitle, false) - defineLinkTarget("{tag-listing}") - currentChapterTitleBreadcrumb = chapTitle - - html := pdf.HTMLBasicNew() - html.Write(5, "This chapter lists what tags are used by which elements.") - pdfColorBlack() - sorted := model.ParsedModelRoot.TagsAvailable - sort.Strings(sorted) - for _, tag := range sorted { - description := "" // TODO: add some separation texts to distinguish between technical assets and data assets etc. for example? - for _, techAsset := range model.SortedTechnicalAssetsByTitle() { - if model.Contains(techAsset.Tags, tag) { - if len(description) > 0 { - description += ", " - } - description += techAsset.Title - } - for _, commLink := range techAsset.CommunicationLinksSorted() { - if model.Contains(commLink.Tags, tag) { - if len(description) > 0 { - description += ", " - } - description += commLink.Title - } - } - } - for _, dataAsset := range model.SortedDataAssetsByTitle() { - if model.Contains(dataAsset.Tags, tag) { - if len(description) > 0 { - description += ", " - } - description += dataAsset.Title - } - } - for _, trustBoundary := range model.SortedTrustBoundariesByTitle() { - if model.Contains(trustBoundary.Tags, tag) { - if len(description) > 0 { - description += ", " - } - description += trustBoundary.Title - } - } - for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() { - if model.Contains(sharedRuntime.Tags, tag) { - if len(description) > 0 { - description += ", " - } - description += sharedRuntime.Title - } - } - if len(description) > 0 { - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdfColorBlack() - html.Write(5, ""+tag+"
") - html.Write(5, description) - } - } -} - -func createRiskCategories() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - // category title - title := "Identified Risks by Vulnerability Category" - pdfColorBlack() - addHeadline(title, false) - defineLinkTarget("{intro-risks-by-vulnerability-category}") - html := pdf.HTMLBasicNew() - var text strings.Builder - text.WriteString("In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " + - "of which " + - "" + strconv.Itoa(len(model.FilteredByOnlyCriticalRisks())) + " are rated as critical, " + - "" + strconv.Itoa(len(model.FilteredByOnlyHighRisks())) + " as high, " + - "" + strconv.Itoa(len(model.FilteredByOnlyElevatedRisks())) + " as elevated, " + - "" + strconv.Itoa(len(model.FilteredByOnlyMediumRisks())) + " as medium, " + - "and " + strconv.Itoa(len(model.FilteredByOnlyLowRisks())) + " as low. " + - "

These risks are distributed across " + strconv.Itoa(len(model.GeneratedRisksByCategory)) + " vulnerability categories. ") - text.WriteString("The following sub-chapters of this section describe each identified risk category.") // TODO more explanation text - html.Write(5, text.String()) - text.Reset() - currentChapterTitleBreadcrumb = title - for _, category := range model.SortedRiskCategories() { - risks := model.SortedRisksOfCategory(category) - - // category color - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - - // category title - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - title := category.Title + ": " + suffix - addHeadline(uni(title), true) - pdfColorBlack() - defineLinkTarget("{" + category.Id + "}") - currentChapterTitleBreadcrumb = title - - // category details - var text strings.Builder - cweLink := "n/a" - if category.CWE > 0 { - cweLink = "CWE " + - strconv.Itoa(category.CWE) + "" - } - text.WriteString("Description (" + category.STRIDE.Title() + "): " + cweLink + "

") - text.WriteString(category.Description) - text.WriteString("


Impact

") - text.WriteString(category.Impact) - text.WriteString("


Detection Logic

") - text.WriteString(category.DetectionLogic) - text.WriteString("


Risk Rating

") - text.WriteString(category.RiskAssessment) - html.Write(5, text.String()) - text.Reset() - colors.ColorRiskStatusFalsePositive(pdf) - text.WriteString("


False Positives

") - text.WriteString(category.FalsePositives) - html.Write(5, text.String()) - text.Reset() - colors.ColorRiskStatusMitigated(pdf) - text.WriteString("


Mitigation (" + category.Function.Title() + "): " + category.Action + "

") - text.WriteString(category.Mitigation) - - asvsChapter := category.ASVS - if len(asvsChapter) == 0 { - text.WriteString("

ASVS Chapter: n/a") - } else { - text.WriteString("

ASVS Chapter: " + asvsChapter + "") - } - - cheatSheetLink := category.CheatSheet - if len(cheatSheetLink) == 0 { - cheatSheetLink = "n/a" - } else { - lastLinkParts := strings.Split(cheatSheetLink, "/") - linkText := lastLinkParts[len(lastLinkParts)-1] - if strings.HasSuffix(linkText, ".html") || strings.HasSuffix(linkText, ".htm") { - var extension = filepath.Ext(linkText) - linkText = linkText[0 : len(linkText)-len(extension)] - } - cheatSheetLink = "" + linkText + "" - } - text.WriteString("
Cheat Sheet: " + cheatSheetLink) - - text.WriteString("


Check

") - text.WriteString(category.Check) - - html.Write(5, text.String()) - text.Reset() - pdf.SetTextColor(0, 0, 0) - - // risk details - pageBreak() - pdf.SetY(36) - text.WriteString("Risk Findings

") - times := strconv.Itoa(len(risks)) + " time" - if len(risks) > 1 { - times += "s" - } - text.WriteString("The risk " + category.Title + " was found " + times + " in the analyzed architecture to be " + - "potentially possible. Each spot should be checked individually by reviewing the implementation whether all " + - "controls have been applied properly in order to mitigate each risk.
") - html.Write(5, text.String()) - text.Reset() - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.
") - pdf.SetFont("Helvetica", "", fontSizeBody) - oldLeft, _, _, _ := pdf.GetMargins() - headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false - for _, risk := range risks { - text.WriteString("
") - html.Write(5, text.String()) - text.Reset() - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } - switch risk.Severity { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - if !headlineCriticalWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft) - text.WriteString("
Critical Risk Severity

") - html.Write(5, text.String()) - text.Reset() - headlineCriticalWritten = true - } - case model.HighSeverity: - colors.ColorHighRisk(pdf) - if !headlineHighWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft) - text.WriteString("
High Risk Severity

") - html.Write(5, text.String()) - text.Reset() - headlineHighWritten = true - } - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - if !headlineElevatedWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft) - text.WriteString("
Elevated Risk Severity

") - html.Write(5, text.String()) - text.Reset() - headlineElevatedWritten = true - } - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - if !headlineMediumWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft) - text.WriteString("
Medium Risk Severity

") - html.Write(5, text.String()) - text.Reset() - headlineMediumWritten = true - } - case model.LowSeverity: - colors.ColorLowRisk(pdf) - if !headlineLowWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft) - text.WriteString("
Low Risk Severity

") - html.Write(5, text.String()) - text.Reset() - headlineLowWritten = true - } - default: - pdfColorBlack() - } - if !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - pdfColorBlack() - } - posY := pdf.GetY() - pdf.SetLeftMargin(oldLeft + 10) - pdf.SetFont("Helvetica", "", fontSizeBody) - text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.") - text.WriteString("
") - html.Write(5, text.String()) - text.Reset() - pdfColorGray() - pdf.SetFont("Helvetica", "", fontSizeVerySmall) - pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - if len(risk.MostRelevantSharedRuntimeId) > 0 { - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantSharedRuntimeId]) - } else if len(risk.MostRelevantTrustBoundaryId) > 0 { - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantTrustBoundaryId]) - } else if len(risk.MostRelevantTechnicalAssetId) > 0 { - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantTechnicalAssetId]) - } - writeRiskTrackingStatus(risk) - pdf.SetLeftMargin(oldLeft) - html.Write(5, text.String()) - text.Reset() - } - pdf.SetLeftMargin(oldLeft) - } -} - -func writeRiskTrackingStatus(risk model.Risk) { - uni := pdf.UnicodeTranslatorFromDescriptor("") - tracking := risk.GetRiskTracking() - pdfColorBlack() - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - switch tracking.Status { - case model.Unchecked: - colors.ColorRiskStatusUnchecked(pdf) - case model.InDiscussion: - colors.ColorRiskStatusInDiscussion(pdf) - case model.Accepted: - colors.ColorRiskStatusAccepted(pdf) - case model.InProgress: - colors.ColorRiskStatusInProgress(pdf) - case model.Mitigated: - colors.ColorRiskStatusMitigated(pdf) - case model.FalsePositive: - colors.ColorRiskStatusFalsePositive(pdf) - default: - pdfColorBlack() - } - pdf.SetFont("Helvetica", "", fontSizeSmall) - if tracking.Status == model.Unchecked { - pdf.SetFont("Helvetica", "B", fontSizeSmall) - } - pdf.CellFormat(25, 4, tracking.Status.Title(), "0", 0, "B", false, 0, "") - if tracking.Status != model.Unchecked { - dateStr := tracking.Date.Format("2006-01-02") - if dateStr == "0001-01-01" { - dateStr = "" - } - justificationStr := tracking.Justification - pdfColorGray() - pdf.CellFormat(20, 4, dateStr, "0", 0, "B", false, 0, "") - pdf.CellFormat(35, 4, uni(tracking.CheckedBy), "0", 0, "B", false, 0, "") - pdf.CellFormat(35, 4, uni(tracking.Ticket), "0", 0, "B", false, 0, "") - pdf.Ln(-1) - pdfColorBlack() - pdf.CellFormat(10, 4, "", "0", 0, "", false, 0, "") - pdf.MultiCell(170, 4, uni(justificationStr), "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - } else { - pdf.Ln(-1) - } - pdfColorBlack() -} - -func createTechnicalAssets() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - // category title - title := "Identified Risks by Technical Asset" - pdfColorBlack() - addHeadline(title, false) - defineLinkTarget("{intro-risks-by-technical-asset}") - html := pdf.HTMLBasicNew() - var text strings.Builder - text.WriteString("In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " + - "of which " + - "" + strconv.Itoa(len(model.FilteredByOnlyCriticalRisks())) + " are rated as critical, " + - "" + strconv.Itoa(len(model.FilteredByOnlyHighRisks())) + " as high, " + - "" + strconv.Itoa(len(model.FilteredByOnlyElevatedRisks())) + " as elevated, " + - "" + strconv.Itoa(len(model.FilteredByOnlyMediumRisks())) + " as medium, " + - "and " + strconv.Itoa(len(model.FilteredByOnlyLowRisks())) + " as low. " + - "

These risks are distributed across " + strconv.Itoa(len(model.InScopeTechnicalAssets())) + " in-scope technical assets. ") - text.WriteString("The following sub-chapters of this section describe each identified risk grouped by technical asset. ") // TODO more explanation text - text.WriteString("The RAA value of a technical asset is the calculated \"Relative Attacker Attractiveness\" value in percent.") - html.Write(5, text.String()) - text.Reset() - currentChapterTitleBreadcrumb = title - for _, technicalAsset := range model.SortedTechnicalAssetsByRiskSeverityAndTitle() { - risks := technicalAsset.GeneratedRisks() - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - if technicalAsset.OutOfScope { - pdfColorOutOfScope() - suffix = "out-of-scope" - } else { - switch model.HighestSeverityStillAtRisk(risks) { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - case model.HighSeverity: - colors.ColorHighRisk(pdf) - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - case model.LowSeverity: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - } - - // asset title - title := technicalAsset.Title + ": " + suffix - addHeadline(uni(title), true) - pdfColorBlack() - defineLinkTarget("{" + technicalAsset.Id + "}") - currentChapterTitleBreadcrumb = title - - // asset description - html := pdf.HTMLBasicNew() - var text strings.Builder - text.WriteString("Description

") - text.WriteString(uni(technicalAsset.Description)) - html.Write(5, text.String()) - text.Reset() - pdf.SetTextColor(0, 0, 0) - - // and more metadata of asset in tabular view - pdf.Ln(-1) - pdf.Ln(-1) - pdf.Ln(-1) - if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdfColorBlack() - pdf.CellFormat(190, 6, "Identified Risks of Asset", "0", 0, "", false, 0, "") - pdfColorGray() - oldLeft, _, _, _ := pdf.GetMargins() - if len(risks) > 0 { - pdf.SetFont("Helvetica", "", fontSizeSmall) - html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(15) - /* - pdf.Ln(-1) - pdf.Ln(-1) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(185, 6, strconv.Itoa(len(risks))+" risks in total were identified", "0", 0, "", false, 0, "") - */ - headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false - pdf.Ln(-1) - for _, risk := range risks { - text.WriteString("
") - html.Write(5, text.String()) - text.Reset() - if pdf.GetY() > 250 { // 250 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - switch risk.Severity { - case model.CriticalSeverity: - colors.ColorCriticalRisk(pdf) - if !headlineCriticalWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft + 3) - html.Write(5, "
Critical Risk Severity

") - headlineCriticalWritten = true - } - case model.HighSeverity: - colors.ColorHighRisk(pdf) - if !headlineHighWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft + 3) - html.Write(5, "
High Risk Severity

") - headlineHighWritten = true - } - case model.ElevatedSeverity: - colors.ColorElevatedRisk(pdf) - if !headlineElevatedWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft + 3) - html.Write(5, "
Elevated Risk Severity

") - headlineElevatedWritten = true - } - case model.MediumSeverity: - colors.ColorMediumRisk(pdf) - if !headlineMediumWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft + 3) - html.Write(5, "
Medium Risk Severity

") - headlineMediumWritten = true - } - case model.LowSeverity: - colors.ColorLowRisk(pdf) - if !headlineLowWritten { - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(oldLeft + 3) - html.Write(5, "
Low Risk Severity

") - headlineLowWritten = true - } - default: - pdfColorBlack() - } - if !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - pdfColorBlack() - } - posY := pdf.GetY() - pdf.SetLeftMargin(oldLeft + 10) - pdf.SetFont("Helvetica", "", fontSizeBody) - text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.") - text.WriteString("
") - html.Write(5, text.String()) - text.Reset() - - pdf.SetFont("Helvetica", "", fontSizeVerySmall) - pdfColorGray() - pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false) - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.Category.Id]) - pdf.SetFont("Helvetica", "", fontSizeBody) - writeRiskTrackingStatus(risk) - pdf.SetLeftMargin(oldLeft) - } - } else { - pdf.Ln(-1) - pdf.Ln(-1) - pdfColorGray() - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetLeftMargin(15) - text := "No risks were identified." - if technicalAsset.OutOfScope { - text = "Asset was defined as out-of-scope." - } - html.Write(5, text) - pdf.Ln(-1) - } - pdf.SetLeftMargin(oldLeft) - - pdf.Ln(-1) - pdf.Ln(4) - if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 6, "Asset Information", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Id, "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Type.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Usage.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "RAA:", "0", 0, "", false, 0, "") - pdfColorBlack() - textRAA := fmt.Sprintf("%.0f", technicalAsset.RAA) + " %" - if technicalAsset.OutOfScope { - pdfColorGray() - textRAA = "out-of-scope" - } - pdf.MultiCell(145, 6, textRAA, "0", "0", false) - pdfColorBlack() - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Size:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Size.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Technology:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Technology.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := technicalAsset.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Internet:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Internet), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Machine:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Machine.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Encryption:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, technicalAsset.Encryption.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Multi-Tenant:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.MultiTenant), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Redundant:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Redundant), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Custom-Developed:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.CustomDevelopedParts), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Client by Human:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.UsedAsClientByHuman), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Data Processed:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsProcessedText := "" - for _, dataAsset := range technicalAsset.DataAssetsProcessedSorted() { - if len(dataAssetsProcessedText) > 0 { - dataAssetsProcessedText += ", " - } - dataAssetsProcessedText += dataAsset.Title - } - if len(dataAssetsProcessedText) == 0 { - pdfColorGray() - dataAssetsProcessedText = "none" - } - pdf.MultiCell(145, 6, uni(dataAssetsProcessedText), "0", "0", false) - - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Data Stored:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsStoredText := "" - for _, dataAsset := range technicalAsset.DataAssetsStoredSorted() { - if len(dataAssetsStoredText) > 0 { - dataAssetsStoredText += ", " - } - dataAssetsStoredText += dataAsset.Title - } - if len(dataAssetsStoredText) == 0 { - pdfColorGray() - dataAssetsStoredText = "none" - } - pdf.MultiCell(145, 6, uni(dataAssetsStoredText), "0", "0", false) - - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Formats Accepted:", "0", 0, "", false, 0, "") - pdfColorBlack() - formatsAcceptedText := "" - for _, formatAccepted := range technicalAsset.DataFormatsAcceptedSorted() { - if len(formatsAcceptedText) > 0 { - formatsAcceptedText += ", " - } - formatsAcceptedText += formatAccepted.Title() - } - if len(formatsAcceptedText) == 0 { - pdfColorGray() - formatsAcceptedText = "none of the special data formats accepted" - } - pdf.MultiCell(145, 6, formatsAcceptedText, "0", "0", false) - - pdf.Ln(-1) - pdf.Ln(4) - if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 6, "Asset Rating", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, uni(technicalAsset.Owner), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, technicalAsset.Confidentiality.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, technicalAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, technicalAsset.Integrity.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, technicalAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, technicalAsset.Availability.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, technicalAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, uni(technicalAsset.JustificationCiaRating), "0", "0", false) - - if technicalAsset.OutOfScope { - pdf.Ln(-1) - pdf.Ln(4) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 6, "Asset Out-of-Scope Justification", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.MultiCell(190, 6, uni(technicalAsset.JustificationOutOfScope), "0", "0", false) - pdf.Ln(-1) - } - pdf.Ln(-1) - - if len(technicalAsset.CommunicationLinks) > 0 { - pdf.Ln(-1) - if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 6, "Outgoing Communication Links: "+strconv.Itoa(len(technicalAsset.CommunicationLinks)), "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Target technical asset names are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.Ln(-1) - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - for _, outgoingCommLink := range technicalAsset.CommunicationLinksSorted() { - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(185, 6, uni(outgoingCommLink.Title)+" (outgoing)", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.MultiCell(185, 6, uni(outgoingCommLink.Description), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdf.Ln(-1) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Target:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(125, 6, uni(model.ParsedModelRoot.TechnicalAssets[outgoingCommLink.TargetId].Title), "0", "0", false) - pdf.Link(60, pdf.GetY()-5, 70, 5, tocLinkIdByAssetId[outgoingCommLink.TargetId]) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, outgoingCommLink.Protocol.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Protocol.IsEncrypted()), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, outgoingCommLink.Authentication.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, outgoingCommLink.Authorization.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Readonly), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, outgoingCommLink.Usage.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := outgoingCommLink.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.VPN), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.IpFiltered), "0", "0", false) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsSentText := "" - for _, dataAsset := range outgoingCommLink.DataAssetsSentSorted() { - if len(dataAssetsSentText) > 0 { - dataAssetsSentText += ", " - } - dataAssetsSentText += dataAsset.Title - } - if len(dataAssetsSentText) == 0 { - pdfColorGray() - dataAssetsSentText = "none" - } - pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsReceivedText := "" - for _, dataAsset := range outgoingCommLink.DataAssetsReceivedSorted() { - if len(dataAssetsReceivedText) > 0 { - dataAssetsReceivedText += ", " - } - dataAssetsReceivedText += dataAsset.Title - } - if len(dataAssetsReceivedText) == 0 { - pdfColorGray() - dataAssetsReceivedText = "none" - } - pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false) - pdf.Ln(-1) - } - } - - incomingCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - if len(incomingCommLinks) > 0 { - pdf.Ln(-1) - if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270 - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 6, "Incoming Communication Links: "+strconv.Itoa(len(incomingCommLinks)), "0", 0, "", false, 0, "") - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Source technical asset names are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.Ln(-1) - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - for _, incomingCommLink := range incomingCommLinks { - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorBlack() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(185, 6, uni(incomingCommLink.Title)+" (incoming)", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.MultiCell(185, 6, uni(incomingCommLink.Description), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdf.Ln(-1) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Source:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, uni(model.ParsedModelRoot.TechnicalAssets[incomingCommLink.SourceId].Title), "0", "0", false) - pdf.Link(60, pdf.GetY()-5, 70, 5, tocLinkIdByAssetId[incomingCommLink.SourceId]) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, incomingCommLink.Protocol.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Protocol.IsEncrypted()), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, incomingCommLink.Authentication.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, incomingCommLink.Authorization.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Readonly), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, incomingCommLink.Usage.String(), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := incomingCommLink.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.VPN), "0", "0", false) - if pdf.GetY() > 270 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.IpFiltered), "0", "0", false) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsSentText := "" - // yep, here we reverse the sent/received direction, as it's the incoming stuff - for _, dataAsset := range incomingCommLink.DataAssetsSentSorted() { - if len(dataAssetsSentText) > 0 { - dataAssetsSentText += ", " - } - dataAssetsSentText += dataAsset.Title - } - if len(dataAssetsSentText) == 0 { - pdfColorGray() - dataAssetsSentText = "none" - } - pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false) - pdfColorGray() - pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "") - pdfColorBlack() - dataAssetsReceivedText := "" - // yep, here we reverse the sent/received direction, as it's the incoming stuff - for _, dataAsset := range incomingCommLink.DataAssetsReceivedSorted() { - if len(dataAssetsReceivedText) > 0 { - dataAssetsReceivedText += ", " - } - dataAssetsReceivedText += dataAsset.Title - } - if len(dataAssetsReceivedText) == 0 { - pdfColorGray() - dataAssetsReceivedText = "none" - } - pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false) - pdf.Ln(-1) - } - } - } -} - -func createDataAssets() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - title := "Identified Data Breach Probabilities by Data Asset" - pdfColorBlack() - addHeadline(title, false) - defineLinkTarget("{intro-risks-by-data-asset}") - html := pdf.HTMLBasicNew() - html.Write(5, "In total "+strconv.Itoa(model.TotalRiskCount())+" potential risks have been identified during the threat modeling process "+ - "of which "+ - ""+strconv.Itoa(len(model.FilteredByOnlyCriticalRisks()))+" are rated as critical, "+ - ""+strconv.Itoa(len(model.FilteredByOnlyHighRisks()))+" as high, "+ - ""+strconv.Itoa(len(model.FilteredByOnlyElevatedRisks()))+" as elevated, "+ - ""+strconv.Itoa(len(model.FilteredByOnlyMediumRisks()))+" as medium, "+ - "and "+strconv.Itoa(len(model.FilteredByOnlyLowRisks()))+" as low. "+ - "

These risks are distributed across "+strconv.Itoa(len(model.ParsedModelRoot.DataAssets))+" data assets. ") - html.Write(5, "The following sub-chapters of this section describe the derived data breach probabilities grouped by data asset.
") // TODO more explanation text - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdfColorGray() - html.Write(5, "Technical asset names and risk IDs are clickable and link to the corresponding chapter.") - pdf.SetFont("Helvetica", "", fontSizeBody) - currentChapterTitleBreadcrumb = title - for _, dataAsset := range model.SortedDataAssetsByDataBreachProbabilityAndTitle() { - if pdf.GetY() > 280 { // 280 as only small font previously (not 250) - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - pdfColorBlack() - switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() { - case model.Probable: - colors.ColorHighRisk(pdf) - case model.Possible: - colors.ColorMediumRisk(pdf) - case model.Improbable: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if !dataAsset.IsDataBreachPotentialStillAtRisk() { - pdfColorBlack() - } - risks := dataAsset.IdentifiedDataBreachProbabilityRisks() - countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks)) - suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk" - if len(risks) != 1 { - suffix += "s" - } - title := uni(dataAsset.Title) + ": " + suffix - addHeadline(title, true) - defineLinkTarget("{data:" + dataAsset.Id + "}") - pdfColorBlack() - html.Write(5, uni(dataAsset.Description)) - html.Write(5, "

") - - pdf.SetFont("Helvetica", "", fontSizeBody) - /* - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Indirect Breach:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - probability := dataAsset.IdentifiedDataBreachProbability() - dataBreachText := probability.String() - switch probability { - case model.Probable: - colors.ColorHighRisk(pdf) - case model.Possible: - colors.ColorMediumRisk(pdf) - case model.Improbable: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if !dataAsset.IsDataBreachPotentialStillAtRisk() { - pdfColorBlack() - dataBreachText = "none" - } - pdf.MultiCell(145, 6, dataBreachText, "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - */ - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, dataAsset.Id, "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, dataAsset.Usage.String(), "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Quantity:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, dataAsset.Quantity.String(), "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := dataAsset.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Origin:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, uni(dataAsset.Origin), "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, uni(dataAsset.Owner), "0", "0", false) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, dataAsset.Confidentiality.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, dataAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, dataAsset.Integrity.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, dataAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.CellFormat(40, 6, dataAsset.Availability.String(), "0", 0, "", false, 0, "") - pdfColorGray() - pdf.CellFormat(115, 6, dataAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.Ln(-1) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, uni(dataAsset.JustificationCiaRating), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Processed by:", "0", 0, "", false, 0, "") - pdfColorBlack() - processedByText := "" - for _, dataAsset := range dataAsset.ProcessedByTechnicalAssetsSorted() { - if len(processedByText) > 0 { - processedByText += ", " - } - processedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back - } - if len(processedByText) == 0 { - pdfColorGray() - processedByText = "none" - } - pdf.MultiCell(145, 6, uni(processedByText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Stored by:", "0", 0, "", false, 0, "") - pdfColorBlack() - storedByText := "" - for _, dataAsset := range dataAsset.StoredByTechnicalAssetsSorted() { - if len(storedByText) > 0 { - storedByText += ", " - } - storedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back - } - if len(storedByText) == 0 { - pdfColorGray() - storedByText = "none" - } - pdf.MultiCell(145, 6, uni(storedByText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Sent via:", "0", 0, "", false, 0, "") - pdfColorBlack() - sentViaText := "" - for _, commLink := range dataAsset.SentViaCommLinksSorted() { - if len(sentViaText) > 0 { - sentViaText += ", " - } - sentViaText += commLink.Title // TODO add link to technical asset detail chapter and back - } - if len(sentViaText) == 0 { - pdfColorGray() - sentViaText = "none" - } - pdf.MultiCell(145, 6, uni(sentViaText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Received via:", "0", 0, "", false, 0, "") - pdfColorBlack() - receivedViaText := "" - for _, commLink := range dataAsset.ReceivedViaCommLinksSorted() { - if len(receivedViaText) > 0 { - receivedViaText += ", " - } - receivedViaText += commLink.Title // TODO add link to technical asset detail chapter and back - } - if len(receivedViaText) == 0 { - pdfColorGray() - receivedViaText = "none" - } - pdf.MultiCell(145, 6, uni(receivedViaText), "0", "0", false) - - /* - // where is this data asset at risk (i.e. why) - risksByTechAssetId := dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId() - techAssetsResponsible := make([]model.TechnicalAsset, 0) - for techAssetId, _ := range risksByTechAssetId { - techAssetsResponsible = append(techAssetsResponsible, model.ParsedModelRoot.TechnicalAssets[techAssetId]) - } - sort.Sort(model.ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk(techAssetsResponsible)) - assetStr := "assets" - if len(techAssetsResponsible) == 1 { - assetStr = "asset" - } - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Risk via:", "0", 0, "", false, 0, "") - if len(techAssetsResponsible) == 0 { - pdfColorGray() - pdf.MultiCell(145, 6, "This data asset is not directly at risk via any technical asset.", "0", "0", false) - } else { - pdfColorBlack() - pdf.MultiCell(145, 6, "This data asset is at direct risk via "+strconv.Itoa(len(techAssetsResponsible))+" technical "+assetStr+":", "0", "0", false) - for _, techAssetResponsible := range techAssetsResponsible { - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - switch model.HighestSeverityStillAtRisk(techAssetResponsible.GeneratedRisks()) { - case model.High: - colors.ColorHighRisk(pdf) - case model.Medium: - colors.ColorMediumRisk(pdf) - case model.Low: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - risks := techAssetResponsible.GeneratedRisks() - if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 { - pdfColorBlack() - } - riskStr := "risks" - if len(risks) == 1 { - riskStr = "risk" - } - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - posY := pdf.GetY() - risksResponsible := techAssetResponsible.GeneratedRisks() - risksResponsibleStillAtRisk := model.ReduceToOnlyStillAtRisk(risksResponsible) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.MultiCell(185, 6, uni(techAssetResponsible.Title)+": "+strconv.Itoa(len(risksResponsibleStillAtRisk))+" / "+strconv.Itoa(len(risksResponsible))+" "+riskStr, "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[techAssetResponsible.Id]) - } - pdfColorBlack() - } - */ - - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Data Breach:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.SetFont("Helvetica", "B", fontSizeBody) - dataBreachProbability := dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() - riskText := dataBreachProbability.String() - switch dataBreachProbability { - case model.Probable: - colors.ColorHighRisk(pdf) - case model.Possible: - colors.ColorMediumRisk(pdf) - case model.Improbable: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if !dataAsset.IsDataBreachPotentialStillAtRisk() { - pdfColorBlack() - riskText = "none" - } - pdf.MultiCell(145, 6, riskText, "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - - // how can is this data asset be indirectly lost (i.e. why) - dataBreachRisksStillAtRisk := dataAsset.IdentifiedDataBreachProbabilityRisksStillAtRisk() - sort.Sort(model.ByDataBreachProbabilitySort(dataBreachRisksStillAtRisk)) - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Data Breach Risks:", "0", 0, "", false, 0, "") - if len(dataBreachRisksStillAtRisk) == 0 { - pdfColorGray() - pdf.MultiCell(145, 6, "This data asset has no data breach potential.", "0", "0", false) - } else { - pdfColorBlack() - riskRemainingStr := "risks" - if countStillAtRisk == 1 { - riskRemainingStr = "risk" - } - pdf.MultiCell(145, 6, "This data asset has data breach potential because of "+ - ""+strconv.Itoa(countStillAtRisk)+" remaining "+riskRemainingStr+":", "0", "0", false) - for _, dataBreachRisk := range dataBreachRisksStillAtRisk { - if pdf.GetY() > 280 { // 280 as only small font here - pageBreak() - pdf.SetY(36) - } - switch dataBreachRisk.DataBreachProbability { - case model.Probable: - colors.ColorHighRisk(pdf) - case model.Possible: - colors.ColorMediumRisk(pdf) - case model.Improbable: - colors.ColorLowRisk(pdf) - default: - pdfColorBlack() - } - if !dataBreachRisk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() { - pdfColorBlack() - } - pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "") - posY := pdf.GetY() - pdf.SetFont("Helvetica", "", fontSizeVerySmall) - pdf.MultiCell(185, 5, dataBreachRisk.DataBreachProbability.Title()+": "+uni(dataBreachRisk.SyntheticId), "0", "0", false) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[dataBreachRisk.Category.Id]) - } - pdfColorBlack() - } - } -} - -func createTrustBoundaries() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - title := "Trust Boundaries" - pdfColorBlack() - addHeadline(title, false) - - html := pdf.HTMLBasicNew() - word := "has" - if len(model.ParsedModelRoot.TrustBoundaries) > 1 { - word = "have" - } - html.Write(5, "In total "+strconv.Itoa(len(model.ParsedModelRoot.TrustBoundaries))+" trust boundaries "+word+" been "+ - "modeled during the threat modeling process.") - currentChapterTitleBreadcrumb = title - for _, trustBoundary := range model.SortedTrustBoundariesByTitle() { - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - colors.ColorTwilight(pdf) - if !trustBoundary.Type.IsNetworkBoundary() { - pdfColorLightGray() - } - html.Write(5, ""+uni(trustBoundary.Title)+"
") - defineLinkTarget("{boundary:" + trustBoundary.Id + "}") - html.Write(5, uni(trustBoundary.Description)) - html.Write(5, "

") - - pdf.SetFont("Helvetica", "", fontSizeBody) - - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, trustBoundary.Id, "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "") - colors.ColorTwilight(pdf) - if !trustBoundary.Type.IsNetworkBoundary() { - pdfColorLightGray() - } - pdf.MultiCell(145, 6, trustBoundary.Type.String(), "0", "0", false) - pdfColorBlack() - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := trustBoundary.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Assets inside:", "0", 0, "", false, 0, "") - pdfColorBlack() - assetsInsideText := "" - for _, assetKey := range trustBoundary.TechnicalAssetsInside { - if len(assetsInsideText) > 0 { - assetsInsideText += ", " - } - assetsInsideText += model.ParsedModelRoot.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back - } - if len(assetsInsideText) == 0 { - pdfColorGray() - assetsInsideText = "none" - } - pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Boundaries nested:", "0", 0, "", false, 0, "") - pdfColorBlack() - boundariesNestedText := "" - for _, assetKey := range trustBoundary.TrustBoundariesNested { - if len(boundariesNestedText) > 0 { - boundariesNestedText += ", " - } - boundariesNestedText += model.ParsedModelRoot.TrustBoundaries[assetKey].Title - } - if len(boundariesNestedText) == 0 { - pdfColorGray() - boundariesNestedText = "none" - } - pdf.MultiCell(145, 6, uni(boundariesNestedText), "0", "0", false) - } -} - -func createSharedRuntimes() { - uni := pdf.UnicodeTranslatorFromDescriptor("") - title := "Shared Runtimes" - pdfColorBlack() - addHeadline(title, false) - - html := pdf.HTMLBasicNew() - word, runtime := "has", "runtime" - if len(model.ParsedModelRoot.SharedRuntimes) > 1 { - word, runtime = "have", "runtimes" - } - html.Write(5, "In total "+strconv.Itoa(len(model.ParsedModelRoot.SharedRuntimes))+" shared "+runtime+" "+word+" been "+ - "modeled during the threat modeling process.") - currentChapterTitleBreadcrumb = title - for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() { - pdfColorBlack() - if pdf.GetY() > 250 { - pageBreak() - pdf.SetY(36) - } else { - html.Write(5, "


") - } - html.Write(5, ""+uni(sharedRuntime.Title)+"
") - defineLinkTarget("{runtime:" + sharedRuntime.Id + "}") - html.Write(5, uni(sharedRuntime.Description)) - html.Write(5, "

") - - pdf.SetFont("Helvetica", "", fontSizeBody) - - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(145, 6, sharedRuntime.Id, "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "") - pdfColorBlack() - tagsUsedText := "" - sorted := sharedRuntime.Tags - sort.Strings(sorted) - for _, tag := range sorted { - if len(tagsUsedText) > 0 { - tagsUsedText += ", " - } - tagsUsedText += tag - } - if len(tagsUsedText) == 0 { - pdfColorGray() - tagsUsedText = "none" - } - pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false) - - if pdf.GetY() > 265 { - pageBreak() - pdf.SetY(36) - } - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(40, 6, "Assets running:", "0", 0, "", false, 0, "") - pdfColorBlack() - assetsInsideText := "" - for _, assetKey := range sharedRuntime.TechnicalAssetsRunning { - if len(assetsInsideText) > 0 { - assetsInsideText += ", " - } - assetsInsideText += model.ParsedModelRoot.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back - } - if len(assetsInsideText) == 0 { - pdfColorGray() - assetsInsideText = "none" - } - pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false) - } -} - -func createRiskRulesChecked(modelFilename string, skipRiskRules string, buildTimestamp string, modelHash string, customRiskRules map[string]model.CustomRiskRule) { - pdf.SetTextColor(0, 0, 0) - title := "Risk Rules Checked by Threagile" - addHeadline(title, false) - defineLinkTarget("{risk-rules-checked}") - currentChapterTitleBreadcrumb = title - - html := pdf.HTMLBasicNew() - var strBuilder strings.Builder - pdfColorGray() - pdf.SetFont("Helvetica", "", fontSizeSmall) - timestamp := time.Now() - strBuilder.WriteString("Threagile Version: " + model.ThreagileVersion) - strBuilder.WriteString("
Threagile Build Timestamp: " + buildTimestamp) - strBuilder.WriteString("
Threagile Execution Timestamp: " + timestamp.Format("20060102150405")) - strBuilder.WriteString("
Model Filename: " + modelFilename) - strBuilder.WriteString("
Model Hash (SHA256): " + modelHash) - html.Write(5, strBuilder.String()) - strBuilder.Reset() - pdfColorBlack() - pdf.SetFont("Helvetica", "", fontSizeBody) - strBuilder.WriteString("

Threagile (see https://threagile.io for more details) is an open-source toolkit for agile threat modeling, created by Christian Schneider (https://christian-schneider.net): It allows to model an architecture with its assets in an agile fashion as a YAML file " + - "directly inside the IDE. Upon execution of the Threagile toolkit all standard risk rules (as well as individual custom rules if present) " + - "are checked against the architecture model. At the time the Threagile toolkit was executed on the model input file " + - "the following risk rules were checked:") - html.Write(5, strBuilder.String()) - strBuilder.Reset() - - // TODO use the new plugin system to discover risk rules instead of hard-coding them here: - skippedRules := strings.Split(skipRiskRules, ",") - skipped := "" - pdf.Ln(-1) - - for id, customRule := range customRiskRules { - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+customRule.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "I", fontSizeBody) - pdf.CellFormat(190, 6, "Custom Risk Rule", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, customRule.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(customRule.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, customRule.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, customRule.Category().RiskAssessment, "0", "0", false) - } - - for _, key := range model.SortedKeysOfIndividualRiskCategories() { - indivRiskCat := model.ParsedModelRoot.IndividualRiskCategories[key] - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - pdf.CellFormat(190, 3, indivRiskCat.Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, indivRiskCat.Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "I", fontSizeBody) - pdf.CellFormat(190, 6, "Individual Risk Category", "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, indivRiskCat.STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(indivRiskCat.Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, indivRiskCat.DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, indivRiskCat.RiskAssessment, "0", "0", false) - } - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, accidental_secret_leak.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+accidental_secret_leak.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, accidental_secret_leak.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, accidental_secret_leak.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(accidental_secret_leak.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, accidental_secret_leak.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, accidental_secret_leak.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, code_backdooring.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+code_backdooring.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, code_backdooring.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, code_backdooring.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(code_backdooring.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, code_backdooring.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, code_backdooring.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, container_baseimage_backdooring.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+container_baseimage_backdooring.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, container_baseimage_backdooring.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(container_baseimage_backdooring.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, container_platform_escape.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+container_platform_escape.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, container_platform_escape.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_platform_escape.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(container_platform_escape.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_platform_escape.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, container_platform_escape.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, cross_site_request_forgery.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+cross_site_request_forgery.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, cross_site_request_forgery.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_request_forgery.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(cross_site_request_forgery.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_request_forgery.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_request_forgery.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, cross_site_scripting.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+cross_site_scripting.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, cross_site_scripting.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_scripting.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(cross_site_scripting.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_scripting.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, cross_site_scripting.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, dos_risky_access_across_trust_boundary.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+dos_risky_access_across_trust_boundary.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, dos_risky_access_across_trust_boundary.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(dos_risky_access_across_trust_boundary.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, incomplete_model.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+incomplete_model.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, incomplete_model.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, incomplete_model.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(incomplete_model.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, incomplete_model.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, incomplete_model.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, ldap_injection.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+ldap_injection.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, ldap_injection.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, ldap_injection.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(ldap_injection.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, ldap_injection.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, ldap_injection.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_authentication.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_authentication.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_authentication.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_authentication.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_authentication_second_factor.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_authentication_second_factor.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_authentication_second_factor.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_authentication_second_factor.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_build_infrastructure.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_build_infrastructure.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_build_infrastructure.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_build_infrastructure.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_build_infrastructure.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_build_infrastructure.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_build_infrastructure.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_cloud_hardening.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_cloud_hardening.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_cloud_hardening.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_cloud_hardening.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_cloud_hardening.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_cloud_hardening.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_cloud_hardening.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_file_validation.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_file_validation.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_file_validation.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_file_validation.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_file_validation.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_file_validation.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_file_validation.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_hardening.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_hardening.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_hardening.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_hardening.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_hardening.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_hardening.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_hardening.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_identity_propagation.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_identity_propagation.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_identity_propagation.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_propagation.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_identity_propagation.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_propagation.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_propagation.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_identity_provider_isolation.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_identity_provider_isolation.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_identity_provider_isolation.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_identity_provider_isolation.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_identity_store.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_identity_store.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_identity_store.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_store.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_identity_store.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_store.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_identity_store.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_network_segmentation.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_network_segmentation.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_network_segmentation.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_network_segmentation.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_network_segmentation.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_network_segmentation.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_network_segmentation.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_vault.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_vault.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_vault.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_vault.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_vault_isolation.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_vault_isolation.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_vault_isolation.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault_isolation.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_vault_isolation.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault_isolation.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_vault_isolation.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, missing_waf.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+missing_waf.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, missing_waf.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_waf.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(missing_waf.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_waf.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, missing_waf.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, mixed_targets_on_shared_runtime.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+mixed_targets_on_shared_runtime.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, mixed_targets_on_shared_runtime.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(mixed_targets_on_shared_runtime.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, path_traversal.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+path_traversal.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, path_traversal.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, path_traversal.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(path_traversal.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, path_traversal.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, path_traversal.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, push_instead_of_pull_deployment.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+push_instead_of_pull_deployment.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, push_instead_of_pull_deployment.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(push_instead_of_pull_deployment.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, search_query_injection.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+search_query_injection.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, search_query_injection.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, search_query_injection.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(search_query_injection.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, search_query_injection.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, search_query_injection.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, server_side_request_forgery.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+server_side_request_forgery.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, server_side_request_forgery.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, server_side_request_forgery.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(server_side_request_forgery.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, server_side_request_forgery.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, server_side_request_forgery.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, service_registry_poisoning.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+service_registry_poisoning.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, service_registry_poisoning.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, service_registry_poisoning.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(service_registry_poisoning.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, service_registry_poisoning.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, service_registry_poisoning.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, sql_nosql_injection.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+sql_nosql_injection.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, sql_nosql_injection.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, sql_nosql_injection.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(sql_nosql_injection.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, sql_nosql_injection.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, sql_nosql_injection.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unchecked_deployment.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unchecked_deployment.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unchecked_deployment.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unchecked_deployment.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unchecked_deployment.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unchecked_deployment.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unchecked_deployment.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unencrypted_asset.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unencrypted_asset.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unencrypted_asset.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_asset.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unencrypted_asset.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_asset.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_asset.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unencrypted_communication.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unencrypted_communication.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unencrypted_communication.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_communication.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unencrypted_communication.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_communication.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unencrypted_communication.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unguarded_access_from_internet.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unguarded_access_from_internet.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unguarded_access_from_internet.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unguarded_access_from_internet.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unguarded_direct_datastore_access.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unguarded_direct_datastore_access.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unguarded_direct_datastore_access.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unguarded_direct_datastore_access.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unnecessary_communication_link.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unnecessary_communication_link.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unnecessary_communication_link.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_communication_link.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unnecessary_communication_link.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_communication_link.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_communication_link.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unnecessary_data_asset.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unnecessary_data_asset.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unnecessary_data_asset.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_asset.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unnecessary_data_asset.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_asset.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_asset.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unnecessary_data_transfer.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unnecessary_data_transfer.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unnecessary_data_transfer.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unnecessary_data_transfer.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, unnecessary_technical_asset.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+unnecessary_technical_asset.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, unnecessary_technical_asset.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(unnecessary_technical_asset.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, untrusted_deserialization.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+untrusted_deserialization.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, untrusted_deserialization.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, untrusted_deserialization.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(untrusted_deserialization.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, untrusted_deserialization.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, untrusted_deserialization.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, wrong_communication_link_content.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+wrong_communication_link_content.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, wrong_communication_link_content.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_communication_link_content.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(wrong_communication_link_content.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_communication_link_content.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_communication_link_content.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, wrong_trust_boundary_content.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+wrong_trust_boundary_content.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, wrong_trust_boundary_content.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(wrong_trust_boundary_content.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().RiskAssessment, "0", "0", false) - - pdf.Ln(-1) - pdf.SetFont("Helvetica", "B", fontSizeBody) - if model.Contains(skippedRules, xml_external_entity.Category().Id) { - skipped = "SKIPPED - " - } else { - skipped = "" - } - pdf.CellFormat(190, 3, skipped+xml_external_entity.Category().Title, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeSmall) - pdf.CellFormat(190, 6, xml_external_entity.Category().Id, "0", 0, "", false, 0, "") - pdf.Ln(-1) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, xml_external_entity.Category().STRIDE.Title(), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, firstParagraph(xml_external_entity.Category().Description), "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, xml_external_entity.Category().DetectionLogic, "0", "0", false) - pdfColorGray() - pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "") - pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "") - pdfColorBlack() - pdf.MultiCell(160, 6, xml_external_entity.Category().RiskAssessment, "0", "0", false) -} - -func createTargetDescription(baseFolder string) { - uni := pdf.UnicodeTranslatorFromDescriptor("") - pdf.SetTextColor(0, 0, 0) - title := "Application Overview" - addHeadline(title, false) - defineLinkTarget("{target-overview}") - currentChapterTitleBreadcrumb = title - - var intro strings.Builder - html := pdf.HTMLBasicNew() - - intro.WriteString("Business Criticality

") - intro.WriteString("The overall business criticality of \"" + uni(model.ParsedModelRoot.Title) + "\" was rated as:

") - html.Write(5, intro.String()) - criticality := model.ParsedModelRoot.BusinessCriticality - intro.Reset() - pdfColorGray() - intro.WriteString("( ") - if criticality == model.Archive { - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - intro.WriteString("" + strings.ToUpper(model.Archive.String()) + "") - html.Write(5, intro.String()) - intro.Reset() - pdfColorGray() - } else { - intro.WriteString(model.Archive.String()) - } - intro.WriteString(" | ") - if criticality == model.Operational { - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - intro.WriteString("" + strings.ToUpper(model.Operational.String()) + "") - html.Write(5, intro.String()) - intro.Reset() - pdfColorGray() - } else { - intro.WriteString(model.Operational.String()) - } - intro.WriteString(" | ") - if criticality == model.Important { - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - intro.WriteString("" + strings.ToUpper(model.Important.String()) + "") - html.Write(5, intro.String()) - intro.Reset() - pdfColorGray() - } else { - intro.WriteString(model.Important.String()) - } - intro.WriteString(" | ") - if criticality == model.Critical { - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - intro.WriteString("" + strings.ToUpper(model.Critical.String()) + "") - html.Write(5, intro.String()) - intro.Reset() - pdfColorGray() - } else { - intro.WriteString(model.Critical.String()) - } - intro.WriteString(" | ") - if criticality == model.MissionCritical { - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - intro.WriteString("" + strings.ToUpper(model.MissionCritical.String()) + "") - html.Write(5, intro.String()) - intro.Reset() - pdfColorGray() - } else { - intro.WriteString(model.MissionCritical.String()) - } - intro.WriteString(" )") - html.Write(5, intro.String()) - intro.Reset() - pdfColorBlack() - - intro.WriteString("


Business Overview

") - intro.WriteString(uni(model.ParsedModelRoot.BusinessOverview.Description)) - html.Write(5, intro.String()) - intro.Reset() - addCustomImages(model.ParsedModelRoot.BusinessOverview.Images, baseFolder, html) - - intro.WriteString("


Technical Overview

") - intro.WriteString(uni(model.ParsedModelRoot.TechnicalOverview.Description)) - html.Write(5, intro.String()) - intro.Reset() - addCustomImages(model.ParsedModelRoot.TechnicalOverview.Images, baseFolder, html) -} - -func addCustomImages(customImages []map[string]string, baseFolder string, html gofpdf.HTMLBasicType) { - var text strings.Builder - for _, customImage := range customImages { - for imageFilename := range customImage { - imageFilenameWithoutPath := filepath.Base(imageFilename) - // check JPEG, PNG or GIF - extension := strings.ToLower(filepath.Ext(imageFilenameWithoutPath)) - if extension == ".jpeg" || extension == ".jpg" || extension == ".png" || extension == ".gif" { - imageFullFilename := baseFolder + "/" + imageFilenameWithoutPath - if pdf.GetY()+getHeightWhenWidthIsFix(imageFullFilename, 180) > 250 { - pageBreak() - pdf.SetY(36) - } else { - text.WriteString("

") - } - text.WriteString(customImage[imageFilename] + ":

") - html.Write(5, text.String()) - text.Reset() - - var options gofpdf.ImageOptions - options.ImageType = "" - pdf.RegisterImage(imageFullFilename, "") - pdf.ImageOptions(imageFullFilename, 15, pdf.GetY()+50, 170, 0, true, options, 0, "") - } else { - log.Print("Ignoring custom image file: ", imageFilenameWithoutPath) - } - } - } -} - -// fileExists checks if a file exists and is not a directory before we -// try using it to prevent further errors. -func fileExists(filename string) bool { - info, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - return !info.IsDir() -} - -func getHeightWhenWidthIsFix(imageFullFilename string, width float64) float64 { - if !fileExists(imageFullFilename) { - panic(errors.New("Image file does not exist (or is not readable as file): " + filepath.Base(imageFullFilename))) - } - /* #nosec imageFullFilename is not tainted (see caller restricting it to image files of model folder only) */ - file, err := os.Open(imageFullFilename) - defer file.Close() - checkErr(err) - image, _, err := image.DecodeConfig(file) - checkErr(err) - return float64(image.Height) / (float64(image.Width) / width) -} - -func embedDataFlowDiagram(diagramFilenamePNG string) { - pdf.SetTextColor(0, 0, 0) - title := "Data-Flow Diagram" - addHeadline(title, false) - defineLinkTarget("{data-flow-diagram}") - currentChapterTitleBreadcrumb = title - - var intro strings.Builder - intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " + - "overview of the data-flow between technical assets. " + - "The RAA value is the calculated Relative Attacker Attractiveness in percent. " + - "For a full high-resolution version of this diagram please refer to the PNG image file alongside this report.") - - html := pdf.HTMLBasicNew() - html.Write(5, intro.String()) - - // check to rotate the image if it is wider than high - /* #nosec diagramFilenamePNG is not tainted */ - imagePath, _ := os.Open(diagramFilenamePNG) - defer imagePath.Close() - srcImage, _, _ := image.Decode(imagePath) - srcDimensions := srcImage.Bounds() - // wider than high? - muchWiderThanHigh := srcDimensions.Dx() > int(float64(srcDimensions.Dy())*1.25) - // fresh page (eventually landscape)? - isLandscapePage = false - /* - pinnedWidth, pinnedHeight := 190.0, 210.0 - if dataFlowDiagramFullscreen { - pinnedHeight = 235.0 - if muchWiderThanHigh { - if allowedPdfLandscapePages { - pinnedWidth = 275.0 - isLandscapePage = true - pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4")) - } else { - // so rotate the image left by 90 degrees - // ok, use temp PNG then - // now rotate left by 90 degrees - rotatedFile, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png") - checkErr(err) - defer os.Remove(rotatedFile.Name()) - dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx())) - err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0}) - checkErr(err) - newImage, _ := os.Create(rotatedFile.Name()) - defer newImage.Close() - err = png.Encode(newImage, dstImage) - checkErr(err) - diagramFilenamePNG = rotatedFile.Name() - } - } else { - pdf.AddPage() - } - } else { - pdf.Ln(10) - }*/ - // embed in PDF - var options gofpdf.ImageOptions - options.ImageType = "" - pdf.RegisterImage(diagramFilenamePNG, "") - var maxWidth, maxHeight, newWidth int - var embedWidth, embedHeight float64 - if allowedPdfLandscapePages && muchWiderThanHigh { - maxWidth, maxHeight = 275, 150 - isLandscapePage = true - pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4")) - } else { - pdf.Ln(10) - maxWidth, maxHeight = 190, 200 // reduced height as a text paragraph is above - } - newWidth = srcDimensions.Dx() / (srcDimensions.Dy() / maxHeight) - if newWidth <= maxWidth { - embedWidth, embedHeight = 0, float64(maxHeight) - } else { - embedWidth, embedHeight = float64(maxWidth), 0 - } - pdf.ImageOptions(diagramFilenamePNG, 10, pdf.GetY(), embedWidth, embedHeight, true, options, 0, "") - isLandscapePage = false - - // add diagram legend page - if embedDiagramLegendPage { - pdf.AddPage() - gofpdi.UseImportedTemplate(pdf, diagramLegendTemplateId, 0, 0, 0, 300) - } -} - -func embedDataRiskMapping(diagramFilenamePNG string) { - pdf.SetTextColor(0, 0, 0) - title := "Data Mapping" - addHeadline(title, false) - defineLinkTarget("{data-risk-mapping}") - currentChapterTitleBreadcrumb = title - - var intro strings.Builder - intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " + - "distribution of data assets across technical assets. The color matches the identified data breach probability and risk level " + - "(see the \"Data Breach Probabilities\" chapter for more details). " + - "A solid line stands for data is stored by the asset and a dashed one means " + - "data is processed by the asset. For a full high-resolution version of this diagram please refer to the PNG image " + - "file alongside this report.") - - html := pdf.HTMLBasicNew() - html.Write(5, intro.String()) - - // TODO dedupe with code from other diagram embedding (almost same code) - // check to rotate the image if it is wider than high - /* #nosec diagramFilenamePNG is not tainted */ - imagePath, _ := os.Open(diagramFilenamePNG) - defer imagePath.Close() - srcImage, _, _ := image.Decode(imagePath) - srcDimensions := srcImage.Bounds() - // wider than high? - widerThanHigh := srcDimensions.Dx() > srcDimensions.Dy() - pinnedWidth, pinnedHeight := 190.0, 195.0 - // fresh page (eventually landscape)? - isLandscapePage = false - /* - if dataFlowDiagramFullscreen { - pinnedHeight = 235.0 - if widerThanHigh { - if allowedPdfLandscapePages { - pinnedWidth = 275.0 - isLandscapePage = true - pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4")) - } else { - // so rotate the image left by 90 degrees - // ok, use temp PNG then - // now rotate left by 90 degrees - rotatedFile, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png") - checkErr(err) - defer os.Remove(rotatedFile.Name()) - dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx())) - err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0}) - checkErr(err) - newImage, _ := os.Create(rotatedFile.Name()) - defer newImage.Close() - err = png.Encode(newImage, dstImage) - checkErr(err) - diagramFilenamePNG = rotatedFile.Name() - } - } else { - pdf.AddPage() - } - } else { - pdf.Ln(10) - } - */ - // embed in PDF - pdf.Ln(10) - var options gofpdf.ImageOptions - options.ImageType = "" - pdf.RegisterImage(diagramFilenamePNG, "") - if widerThanHigh { - pinnedHeight = 0 - } else { - pinnedWidth = 0 - } - pdf.ImageOptions(diagramFilenamePNG, 10, pdf.GetY(), pinnedWidth, pinnedHeight, true, options, 0, "") - isLandscapePage = false -} - -func writeReportToFile(reportFilename string) { - err := pdf.OutputFileAndClose(reportFilename) - checkErr(err) -} - -func addHeadline(headline string, small bool) { - pdf.AddPage() - gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300) - fontSize := fontSizeHeadline - if small { - fontSize = fontSizeHeadlineSmall - } - pdf.SetFont("Helvetica", "B", float64(fontSize)) - pdf.Text(11, 40, headline) - pdf.SetFont("Helvetica", "", fontSizeBody) - pdf.SetX(17) - pdf.SetY(46) -} - -func pageBreak() { - pdf.SetDrawColor(0, 0, 0) - pdf.SetDashPattern([]float64{}, 0) - pdf.AddPage() - gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300) - pdf.SetX(17) - pdf.SetY(20) -} -func pageBreakInLists() { - pageBreak() - pdf.SetLineWidth(0.25) - pdf.SetDrawColor(160, 160, 160) - pdf.SetDashPattern([]float64{0.5, 0.5}, 0) -} - -func pdfColorDataAssets() { - pdf.SetTextColor(18, 36, 111) -} -func rgbHexColorDataAssets() string { - return "#12246F" -} - -func pdfColorTechnicalAssets() { - pdf.SetTextColor(18, 36, 111) -} -func rgbHexColorTechnicalAssets() string { - return "#12246F" -} - -func pdfColorTrustBoundaries() { - pdf.SetTextColor(18, 36, 111) -} -func rgbHexColorTrustBoundaries() string { - return "#12246F" -} - -func pdfColorSharedRuntime() { - pdf.SetTextColor(18, 36, 111) -} -func rgbHexColorSharedRuntime() string { - return "#12246F" -} - -func pdfColorRiskFindings() { - pdf.SetTextColor(160, 40, 30) -} -func rgbHexColorRiskFindings() string { - return "#A0281E" -} - -func pdfColorDisclaimer() { - pdf.SetTextColor(140, 140, 140) -} -func rgbHexColorDisclaimer() string { - return "#8C8C8C" -} - -func pdfColorOutOfScope() { - pdf.SetTextColor(127, 127, 127) -} -func rgbHexColorOutOfScope() string { - return "#7F7F7F" -} - -func pdfColorGray() { - pdf.SetTextColor(80, 80, 80) -} -func rgbHexColorGray() string { - return "#505050" -} - -func pdfColorLightGray() { - pdf.SetTextColor(100, 100, 100) -} -func rgbHexColorLightGray() string { - return "#646464" -} - -func pdfColorBlack() { - pdf.SetTextColor(0, 0, 0) -} -func rgbHexColorBlack() string { - return "#000000" -} - -func pdfColorRed() { - pdf.SetTextColor(255, 0, 0) -} -func rgbHexColorRed() string { - return "#FF0000" -} diff --git a/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go b/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go deleted file mode 100644 index 54627414..00000000 --- a/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go +++ /dev/null @@ -1,98 +0,0 @@ -package dos_risky_access_across_trust_boundary - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "dos-risky-access-across-trust-boundary", - Title: "DoS-risky Access Across Trust-Boundary", - Description: "Assets accessed across trust boundaries with critical or mission-critical availability rating " + - "are more prone to Denial-of-Service (DoS) risks.", - Impact: "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.", - ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html", - Action: "Anti-DoS Measures", - Mitigation: "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. " + - "Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. " + - "Generally applying redundancy on the targeted technical asset reduces the risk of DoS.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.DenialOfService, - DetectionLogic: "In-scope technical assets (excluding " + model.LoadBalancer.String() + ") with " + - "availability rating of " + model.Critical.String() + " or higher which have incoming data-flows across a " + - "network trust-boundary (excluding " + model.DevOps.String() + " usage).", - RiskAssessment: "Matching technical assets with availability rating " + - "of " + model.Critical.String() + " or higher are " + - "at " + model.LowSeverity.String() + " risk. When the availability rating is " + - model.MissionCritical.String() + " and neither a VPN nor IP filter for the incoming data-flow nor redundancy " + - "for the asset is applied, the risk-rating is considered " + model.MediumSeverity.String() + ".", // TODO reduce also, when data-flow authenticated and encrypted? - FalsePositives: "When the accessed target operations are not time- or resource-consuming.", - ModelFailurePossibleReason: false, - CWE: 400, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && technicalAsset.Technology != model.LoadBalancer && - technicalAsset.Availability >= model.Critical { - for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { - sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId] - if sourceAsset.Technology.IsTrafficForwarding() { - // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human - callersCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[sourceAsset.Id] - for _, callersCommLink := range callersCommLinks { - risks = checkRisk(technicalAsset, callersCommLink, sourceAsset.Title, risks) - } - } else { - risks = checkRisk(technicalAsset, incomingAccess, "", risks) - } - } - } - } - return risks -} - -func checkRisk(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink, hopBetween string, risks []model.Risk) []model.Risk { - if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() && - !incomingAccess.Protocol.IsProcessLocal() && incomingAccess.Usage != model.DevOps { - highRisk := technicalAsset.Availability == model.MissionCritical && - !incomingAccess.VPN && !incomingAccess.IpFiltered && !technicalAsset.Redundant - risks = append(risks, createRisk(technicalAsset, incomingAccess, hopBetween, - model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk)) - } - return risks -} - -func createRisk(techAsset model.TechnicalAsset, dataFlow model.CommunicationLink, hopBetween string, - clientOutsideTrustBoundary model.TechnicalAsset, moreRisky bool) model.Risk { - impact := model.LowImpact - if moreRisky { - impact = model.MediumImpact - } - if len(hopBetween) > 0 { - hopBetween = " forwarded via " + hopBetween + "" - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: "Denial-of-Service risky access of " + techAsset.Title + " by " + clientOutsideTrustBoundary.Title + - " via " + dataFlow.Title + "" + hopBetween, - MostRelevantTechnicalAssetId: techAsset.Id, - MostRelevantCommunicationLinkId: dataFlow.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{}, - } - risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataFlow.Id - return risk -} diff --git a/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go b/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go deleted file mode 100644 index e491655d..00000000 --- a/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go +++ /dev/null @@ -1,84 +0,0 @@ -package missing_authentication_second_factor - -import ( - "github.com/threagile/threagile/model" - "github.com/threagile/threagile/risks/built-in/missing-authentication" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-authentication-second-factor", - Title: "Missing Two-Factor Authentication (2FA)", - Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests with " + - "two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.", - Impact: "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.", - ASVS: "V2 - Authentication Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html", - Action: "Authentication with Second Factor (2FA)", - Mitigation: "Apply an authentication method to the technical asset protecting highly sensitive data via " + - "two-factor authentication for human users.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.BusinessSide, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope technical assets (except " + model.LoadBalancer.String() + ", " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", " + model.IDS.String() + ", and " + model.IPS.String() + ") should authenticate incoming requests via two-factor authentication (2FA) " + - "when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.", - RiskAssessment: model.MediumSeverity.String(), - FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + - "can be considered as false positives after individual review.", - ModelFailurePossibleReason: false, - CWE: 308, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.OutOfScope || - technicalAsset.Technology.IsTrafficForwarding() || - technicalAsset.Technology.IsUnprotectedCommsTolerated() { - continue - } - if technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical || - technicalAsset.HighestAvailability() >= model.Critical || - technicalAsset.MultiTenant { - // check each incoming data flow - commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - for _, commLink := range commLinks { - caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId] - if caller.Technology.IsUnprotectedCommsTolerated() || caller.Type == model.Datastore { - continue - } - if caller.UsedAsClientByHuman { - moreRisky := commLink.HighestConfidentiality() >= model.Confidential || - commLink.HighestIntegrity() >= model.Critical - if moreRisky && commLink.Authentication != model.TwoFactor { - risks = append(risks, missing_authentication.CreateRisk(technicalAsset, commLink, commLink, "", model.MediumImpact, model.Unlikely, true, Category())) - } - } else if caller.Technology.IsTrafficForwarding() { - // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human - callersCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[caller.Id] - for _, callersCommLink := range callersCommLinks { - callersCaller := model.ParsedModelRoot.TechnicalAssets[callersCommLink.SourceId] - if callersCaller.Technology.IsUnprotectedCommsTolerated() || callersCaller.Type == model.Datastore { - continue - } - if callersCaller.UsedAsClientByHuman { - moreRisky := callersCommLink.HighestConfidentiality() >= model.Confidential || - callersCommLink.HighestIntegrity() >= model.Critical - if moreRisky && callersCommLink.Authentication != model.TwoFactor { - risks = append(risks, missing_authentication.CreateRisk(technicalAsset, commLink, callersCommLink, caller.Title, model.MediumImpact, model.Unlikely, true, Category())) - } - } - } - } - } - } - } - return risks -} diff --git a/risks/built-in/missing-authentication/missing-authentication-rule.go b/risks/built-in/missing-authentication/missing-authentication-rule.go deleted file mode 100644 index 9d002242..00000000 --- a/risks/built-in/missing-authentication/missing-authentication-rule.go +++ /dev/null @@ -1,98 +0,0 @@ -package missing_authentication - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-authentication", - Title: "Missing Authentication", - Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes or stores sensitive data. ", - Impact: "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.", - ASVS: "V2 - Authentication Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", - Action: "Authentication of Incoming Requests", - Mitigation: "Apply an authentication method to the technical asset. To protect highly sensitive data consider " + - "the use of two-factor authentication for human users.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope technical assets (except " + model.LoadBalancer.String() + ", " + model.ReverseProxy.String() + ", " + model.ServiceRegistry.String() + ", " + model.WAF.String() + ", " + model.IDS.String() + ", and " + model.IPS.String() + " and in-process calls) should authenticate incoming requests when the asset processes or stores " + - "sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).", - RiskAssessment: "The risk rating (medium or high) " + - "depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.", - FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + - "can be considered as false positives after individual review.", - ModelFailurePossibleReason: false, - CWE: 306, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.OutOfScope || technicalAsset.Technology == model.LoadBalancer || - technicalAsset.Technology == model.ReverseProxy || technicalAsset.Technology == model.ServiceRegistry || technicalAsset.Technology == model.WAF || technicalAsset.Technology == model.IDS || technicalAsset.Technology == model.IPS { - continue - } - if technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical || - technicalAsset.HighestAvailability() >= model.Critical || - technicalAsset.MultiTenant { - // check each incoming data flow - commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - for _, commLink := range commLinks { - caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId] - if caller.Technology.IsUnprotectedCommsTolerated() || caller.Type == model.Datastore { - continue - } - highRisk := commLink.HighestConfidentiality() == model.StrictlyConfidential || - commLink.HighestIntegrity() == model.MissionCritical - lowRisk := commLink.HighestConfidentiality() <= model.Internal && - commLink.HighestIntegrity() == model.Operational - impact := model.MediumImpact - if highRisk { - impact = model.HighImpact - } else if lowRisk { - impact = model.LowImpact - } - if commLink.Authentication == model.NoneAuthentication && !commLink.Protocol.IsProcessLocal() { - risks = append(risks, CreateRisk(technicalAsset, commLink, commLink, "", impact, model.Likely, false, Category())) - } - } - } - } - return risks -} - -func CreateRisk(technicalAsset model.TechnicalAsset, incomingAccess, incomingAccessOrigin model.CommunicationLink, hopBetween string, - impact model.RiskExploitationImpact, likelihood model.RiskExploitationLikelihood, twoFactor bool, category model.RiskCategory) model.Risk { - factorString := "" - if twoFactor { - factorString = "Two-Factor " - } - if len(hopBetween) > 0 { - hopBetween = "forwarded via " + hopBetween + " " - } - risk := model.Risk{ - Category: category, - Severity: model.CalculateSeverity(likelihood, impact), - ExploitationLikelihood: likelihood, - ExploitationImpact: impact, - Title: "Missing " + factorString + "Authentication covering communication link " + incomingAccess.Title + " " + - "from " + model.ParsedModelRoot.TechnicalAssets[incomingAccessOrigin.SourceId].Title + " " + hopBetween + - "to " + technicalAsset.Title + "", - MostRelevantTechnicalAssetId: technicalAsset.Id, - MostRelevantCommunicationLinkId: incomingAccess.Id, - DataBreachProbability: model.Possible, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + incomingAccess.Id + "@" + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/missing-hardening/missing-hardening-rule.go b/risks/built-in/missing-hardening/missing-hardening-rule.go deleted file mode 100644 index b4795740..00000000 --- a/risks/built-in/missing-hardening/missing-hardening-rule.go +++ /dev/null @@ -1,71 +0,0 @@ -package missing_hardening - -import ( - "github.com/threagile/threagile/model" - "strconv" -) - -const raaLimit = 55 -const raaLimitReduced = 40 - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-hardening", - Title: "Missing Hardening", - Description: "Technical assets with a Relative Attacker Attractiveness (RAA) value of " + strconv.Itoa(raaLimit) + " % or higher should be " + - "explicitly hardened taking best practices and vendor hardening guides into account.", - Impact: "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.", - ASVS: "V14 - Configuration Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", - Action: "System Hardening", - Mitigation: "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor " + - "recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.Tampering, - DetectionLogic: "In-scope technical assets with RAA values of " + strconv.Itoa(raaLimit) + " % or higher. " + - "Generally for high-value targets like datastores, application servers, identity providers and ERP systems this limit is reduced to " + strconv.Itoa(raaLimitReduced) + " %", - RiskAssessment: "The risk rating depends on the sensitivity of the data processed or stored in the technical asset.", - FalsePositives: "Usually no false positives.", - ModelFailurePossibleReason: false, - CWE: 16, - } -} - -func SupportedTags() []string { - return []string{"tomcat"} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope { - if technicalAsset.RAA >= raaLimit || (technicalAsset.RAA >= raaLimitReduced && - (technicalAsset.Type == model.Datastore || technicalAsset.Technology == model.ApplicationServer || technicalAsset.Technology == model.IdentityProvider || technicalAsset.Technology == model.ERP)) { - risks = append(risks, createRisk(technicalAsset)) - } - } - } - return risks -} - -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { - title := "Missing Hardening risk at " + technicalAsset.Title + "" - impact := model.LowImpact - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical { - impact = model.MediumImpact - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Likely, impact), - ExploitationLikelihood: model.Likely, - ExploitationImpact: impact, - Title: title, - MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go b/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go deleted file mode 100644 index 31f10700..00000000 --- a/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go +++ /dev/null @@ -1,100 +0,0 @@ -package missing_identity_propagation - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-identity-propagation", - Title: "Missing Identity Propagation", - Description: "Technical assets (especially multi-tenant systems), which usually process data for endusers should " + - "authorize every request based on the identity of the enduser when the data flow is authenticated (i.e. non-public). " + - "For DevOps usages at least a technical-user authorization is required.", - Impact: "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within " + - "the system due to missing resource-based authorization checks.", - ASVS: "V4 - Access Control Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", - Action: "Identity Propagation and Resource-based Authorization", - Mitigation: "When processing requests for endusers if possible authorize in the backend against the propagated " + - "identity of the enduser. This can be achieved in passing JWTs or similar tokens and checking them in the backend " + - "services. For DevOps usages apply at least a technical-user authorization.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope service-like technical assets which usually process data based on enduser requests, if authenticated " + - "(i.e. non-public), should authorize incoming requests based on the propagated enduser identity when their rating is sensitive. " + - "This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). " + - "DevOps usages are exempted from this risk.", - RiskAssessment: "The risk rating (medium or high) " + - "depends on the confidentiality, integrity, and availability rating of the technical asset.", - FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " + - "can be considered as false positives after individual review.", - ModelFailurePossibleReason: false, - CWE: 284, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if technicalAsset.OutOfScope { - continue - } - if technicalAsset.Technology.IsUsuallyProcessingEnduserRequests() && - (technicalAsset.Confidentiality >= model.Confidential || - technicalAsset.Integrity >= model.Critical || - technicalAsset.Availability >= model.Critical || - (technicalAsset.MultiTenant && - (technicalAsset.Confidentiality >= model.Restricted || - technicalAsset.Integrity >= model.Important || - technicalAsset.Availability >= model.Important))) { - // check each incoming authenticated data flow - commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - for _, commLink := range commLinks { - caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId] - if !caller.Technology.IsUsuallyAbleToPropagateIdentityToOutgoingTargets() || caller.Type == model.Datastore { - continue - } - if commLink.Authentication != model.NoneAuthentication && - commLink.Authorization != model.EnduserIdentityPropagation { - if commLink.Usage == model.DevOps && commLink.Authorization != model.NoneAuthorization { - continue - } - highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical || - technicalAsset.Availability == model.MissionCritical - risks = append(risks, createRisk(technicalAsset, commLink, highRisk)) - } - } - } - } - return risks -} - -func createRisk(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink, moreRisky bool) model.Risk { - impact := model.LowImpact - if moreRisky { - impact = model.MediumImpact - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: "Missing Enduser Identity Propagation over communication link " + incomingAccess.Title + " " + - "from " + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Title + " " + - "to " + technicalAsset.Title + "", - MostRelevantTechnicalAssetId: technicalAsset.Id, - MostRelevantCommunicationLinkId: incomingAccess.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + incomingAccess.Id + "@" + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/missing-identity-store/missing-identity-store-rule.go b/risks/built-in/missing-identity-store/missing-identity-store-rule.go deleted file mode 100644 index 9096e320..00000000 --- a/risks/built-in/missing-identity-store/missing-identity-store-rule.go +++ /dev/null @@ -1,95 +0,0 @@ -package missing_identity_store - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-identity-store", - Title: "Missing Identity Store", - Description: "The modeled architecture does not contain an identity store, which might be the risk of a model missing " + - "critical assets (and thus not seeing their risks).", - Impact: "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store " + - "that is currently missing in the model.", - ASVS: "V2 - Authentication Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", - Action: "Identity Store", - Mitigation: "Include an identity store in the model if the application has a login.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Spoofing, - DetectionLogic: "Models with authenticated data-flows authorized via enduser-identity missing an in-scope identity store.", - RiskAssessment: "The risk rating depends on the sensitivity of the enduser-identity authorized technical assets and " + - "their data assets processed and stored.", - FalsePositives: "Models only offering data/services without any real authentication need " + - "can be considered as false positives after individual review.", - ModelFailurePossibleReason: true, - CWE: 287, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { - if !technicalAsset.OutOfScope && - (technicalAsset.Technology == model.IdentityStoreLDAP || technicalAsset.Technology == model.IdentityStoreDatabase) { - // everything fine, no risk, as we have an in-scope identity store in the model - return risks - } - } - // now check if we have enduser-identity authorized communication links, then it's a risk - riskIdentified := false - var mostRelevantAsset model.TechnicalAsset - impact := model.LowImpact - for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - for _, commLink := range technicalAsset.CommunicationLinksSorted() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset - if commLink.Authorization == model.EnduserIdentityPropagation { - riskIdentified = true - targetAsset := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId] - if impact == model.LowImpact { - mostRelevantAsset = targetAsset - if targetAsset.HighestConfidentiality() >= model.Confidential || - targetAsset.HighestIntegrity() >= model.Critical || - targetAsset.HighestAvailability() >= model.Critical { - impact = model.MediumImpact - } - } - if targetAsset.Confidentiality >= model.Confidential || - targetAsset.Integrity >= model.Critical || - targetAsset.Availability >= model.Critical { - impact = model.MediumImpact - } - // just for referencing the most interesting asset - if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { - mostRelevantAsset = technicalAsset - } - } - } - } - if riskIdentified { - risks = append(risks, createRisk(mostRelevantAsset, impact)) - } - return risks -} - -func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk { - title := "Missing Identity Store in the threat model (referencing asset " + technicalAsset.Title + " as an example)" - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: title, - MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{}, - } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go b/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go deleted file mode 100644 index 495a3215..00000000 --- a/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go +++ /dev/null @@ -1,97 +0,0 @@ -package missing_network_segmentation - -import ( - "github.com/threagile/threagile/model" - "sort" -) - -const raaLimit = 50 - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-network-segmentation", - Title: "Missing Network Segmentation", - Description: "Highly sensitive assets and/or datastores residing in the same network segment than other " + - "lower sensitive assets (like webservers or content management systems etc.) should be better protected " + - "by a network segmentation trust-boundary.", - Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " + - "more valuable targets, as they are not separated by network segmentation.", - ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", - Action: "Network Segmentation", - Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive assets and/or datastores.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope technical assets with high sensitivity and RAA values as well as datastores " + - "when surrounded by assets (without a network trust-boundary in-between) which are of type " + model.ClientSystem.String() + ", " + - model.WebServer.String() + ", " + model.WebApplication.String() + ", " + model.CMS.String() + ", " + model.WebServiceREST.String() + ", " + model.WebServiceSOAP.String() + ", " + - model.BuildPipeline.String() + ", " + model.SourcecodeRepository.String() + ", " + model.Monitoring.String() + ", or similar and there is no direct connection between these " + - "(hence no requirement to be so close to each other).", - RiskAssessment: "Default is " + model.LowSeverity.String() + " risk. The risk is increased to " + model.MediumSeverity.String() + " when the asset missing the " + - "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".", - FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " + - "containing/processing highly sensitive data.", - ModelFailurePossibleReason: false, - CWE: 1008, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order - // range over them in sorted (hence re-producible) way: - keys := make([]string, 0) - for k, _ := range model.ParsedModelRoot.TechnicalAssets { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[key] - if !technicalAsset.OutOfScope && technicalAsset.Technology != model.ReverseProxy && technicalAsset.Technology != model.WAF && technicalAsset.Technology != model.IDS && technicalAsset.Technology != model.IPS && technicalAsset.Technology != model.ServiceRegistry { - if technicalAsset.RAA >= raaLimit && (technicalAsset.Type == model.Datastore || technicalAsset.Confidentiality >= model.Confidential || - technicalAsset.Integrity >= model.Critical || technicalAsset.Availability >= model.Critical) { - // now check for any other same-network assets of certain types which have no direct connection - for _, sparringAssetCandidateId := range keys { // so inner loop again over all assets - if technicalAsset.Id != sparringAssetCandidateId { - sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId] - if sparringAssetCandidate.Technology.IsLessProtectedType() && - technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) && - !technicalAsset.HasDirectConnection(sparringAssetCandidateId) && - !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() { - highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical || technicalAsset.Availability == model.MissionCritical - risks = append(risks, createRisk(technicalAsset, highRisk)) - break - } - } - } - } - } - } - return risks -} - -func createRisk(techAsset model.TechnicalAsset, moreRisky bool) model.Risk { - impact := model.LowImpact - if moreRisky { - impact = model.MediumImpact - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: "Missing Network Segmentation to further encapsulate and protect " + techAsset.Title + " against unrelated " + - "lower protected assets in the same network segment, which might be easier to compromise by attackers", - MostRelevantTechnicalAssetId: techAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{techAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id - return risk -} diff --git a/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go b/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go deleted file mode 100644 index 8ef10cdf..00000000 --- a/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go +++ /dev/null @@ -1,97 +0,0 @@ -package missing_vault_isolation - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "missing-vault-isolation", - Title: "Missing Vault Isolation", - Description: "Highly sensitive vault assets and their datastores should be isolated from other assets " + - "by their own network segmentation trust-boundary (" + model.ExecutionEnvironment.String() + " boundaries do not count as network isolation).", - Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " + - "highly sensitive vault assets and their datastores, as they are not separated by network segmentation.", - ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", - Action: "Network Segmentation", - Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their datastores.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope vault assets " + - "when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). " + - "This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).", - RiskAssessment: "Default is " + model.MediumImpact.String() + " impact. The impact is increased to " + model.HighImpact.String() + " when the asset missing the " + - "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".", - FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " + - "vaults with data of highest sensitivity.", - ModelFailurePossibleReason: false, - CWE: 1008, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets { - if !technicalAsset.OutOfScope && technicalAsset.Technology == model.Vault { - moreImpact := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical || - technicalAsset.Availability == model.MissionCritical - sameExecutionEnv := false - createRiskEntry := false - // now check for any other same-network assets of non-vault-related types - for sparringAssetCandidateId, _ := range model.ParsedModelRoot.TechnicalAssets { // so inner loop again over all assets - if technicalAsset.Id != sparringAssetCandidateId { - sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId] - if sparringAssetCandidate.Technology != model.Vault && !isVaultStorage(technicalAsset, sparringAssetCandidate) { - if technicalAsset.IsSameExecutionEnvironment(sparringAssetCandidateId) { - createRiskEntry = true - sameExecutionEnv = true - } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) { - createRiskEntry = true - } - } - } - } - if createRiskEntry { - risks = append(risks, createRisk(technicalAsset, moreImpact, sameExecutionEnv)) - } - } - } - return risks -} - -func isVaultStorage(vault model.TechnicalAsset, storage model.TechnicalAsset) bool { - return storage.Type == model.Datastore && vault.HasDirectConnection(storage.Id) -} - -func createRisk(techAsset model.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) model.Risk { - impact := model.MediumImpact - likelihood := model.Unlikely - others := "in the same network segment" - if moreImpact { - impact = model.HighImpact - } - if sameExecutionEnv { - likelihood = model.Likely - others = "in the same execution environment" - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(likelihood, impact), - ExploitationLikelihood: likelihood, - ExploitationImpact: impact, - Title: "Missing Vault Isolation to further encapsulate and protect vault-related asset " + techAsset.Title + " against unrelated " + - "lower protected assets " + others + ", which might be easier to compromise by attackers", - MostRelevantTechnicalAssetId: techAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{techAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id - return risk -} diff --git a/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go b/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go deleted file mode 100644 index 5cee3a3e..00000000 --- a/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go +++ /dev/null @@ -1,73 +0,0 @@ -package service_registry_poisoning - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "service-registry-poisoning", - Title: "Service Registry Poisoning", - Description: "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.", - Impact: "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or " + - "malicious lookup and config data leading to breach of sensitive data.", - ASVS: "V10 - Malicious Code Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", - Action: "Service Registry Integrity Check", - Mitigation: "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.Spoofing, - DetectionLogic: "In-scope service registries.", - RiskAssessment: "The risk rating depends on the sensitivity of the technical assets accessing the service registry " + - "as well as the data assets processed or stored.", - FalsePositives: "Service registries not used for service discovery " + - "can be considered as false positives after individual review.", - ModelFailurePossibleReason: false, - CWE: 693, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && technicalAsset.Technology == model.ServiceRegistry { - incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - risks = append(risks, createRisk(technicalAsset, incomingFlows)) - } - } - return risks -} - -func createRisk(technicalAsset model.TechnicalAsset, incomingFlows []model.CommunicationLink) model.Risk { - title := "Service Registry Poisoning risk at " + technicalAsset.Title + "" - impact := model.LowImpact - - for _, incomingFlow := range incomingFlows { - caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId] - if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical || technicalAsset.HighestAvailability() == model.MissionCritical || - caller.HighestConfidentiality() == model.StrictlyConfidential || caller.HighestIntegrity() == model.MissionCritical || caller.HighestAvailability() == model.MissionCritical || - incomingFlow.HighestConfidentiality() == model.StrictlyConfidential || incomingFlow.HighestIntegrity() == model.MissionCritical || incomingFlow.HighestAvailability() == model.MissionCritical { - impact = model.MediumImpact - break - } - } - - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: title, - MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: find all service-lookup-using tech assets, which then might use spoofed lookups? - } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go b/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go deleted file mode 100644 index cdf23c6b..00000000 --- a/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go +++ /dev/null @@ -1,89 +0,0 @@ -package unencrypted_asset - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "unencrypted-asset", - Title: "Unencrypted Technical Assets", - Description: "Due to the confidentiality rating of the technical asset itself and/or the processed data assets " + - "this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.", - Impact: "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.", - ASVS: "V6 - Stored Cryptography Verification Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html", - Action: "Encryption of Technical Asset", - Mitigation: "Apply encryption to the technical asset.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Operations, - STRIDE: model.InformationDisclosure, - DetectionLogic: "In-scope unencrypted technical assets (excluding " + model.ReverseProxy.String() + - ", " + model.LoadBalancer.String() + ", " + model.WAF.String() + ", " + model.IDS.String() + - ", " + model.IPS.String() + " and embedded components like " + model.Library.String() + ") " + - "storing data assets rated at least as " + model.Confidential.String() + " or " + model.Critical.String() + ". " + - "For technical assets storing data assets rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + " the " + - "encryption must be of type " + model.DataWithEnduserIndividualKey.String() + ".", - RiskAssessment: "Depending on the confidentiality rating of the stored data-assets either medium or high risk.", - FalsePositives: "When all sensitive data stored within the asset is already fully encrypted on document or data level.", - ModelFailurePossibleReason: false, - CWE: 311, - } -} - -func SupportedTags() []string { - return []string{} -} - -// check for technical assets that should be encrypted due to their confidentiality -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && !IsEncryptionWaiver(technicalAsset) && - (technicalAsset.HighestConfidentiality() >= model.Confidential || - technicalAsset.HighestIntegrity() >= model.Critical) { - verySensitive := technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || - technicalAsset.HighestIntegrity() == model.MissionCritical - requiresEnduserKey := verySensitive && technicalAsset.Technology.IsUsuallyStoringEnduserData() - if technicalAsset.Encryption == model.NoneEncryption { - impact := model.MediumImpact - if verySensitive { - impact = model.HighImpact - } - risks = append(risks, createRisk(technicalAsset, impact, requiresEnduserKey)) - } else if requiresEnduserKey && - (technicalAsset.Encryption == model.Transparent || technicalAsset.Encryption == model.DataWithSymmetricSharedKey || technicalAsset.Encryption == model.DataWithAsymmetricSharedKey) { - risks = append(risks, createRisk(technicalAsset, model.MediumImpact, requiresEnduserKey)) - } - } - } - return risks -} - -// Simple routing assets like 'Reverse Proxy' or 'Load Balancer' usually don't have their own storage and thus have no -// encryption requirement for the asset itself (though for the communication, but that's a different rule) -func IsEncryptionWaiver(asset model.TechnicalAsset) bool { - return asset.Technology == model.ReverseProxy || asset.Technology == model.LoadBalancer || - asset.Technology == model.WAF || asset.Technology == model.IDS || asset.Technology == model.IPS || - asset.Technology.IsEmbeddedComponent() -} - -func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact, requiresEnduserKey bool) model.Risk { - title := "Unencrypted Technical Asset named " + technicalAsset.Title + "" - if requiresEnduserKey { - title += " missing enduser-individual encryption with " + model.DataWithEnduserIndividualKey.String() - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Unlikely, impact), - ExploitationLikelihood: model.Unlikely, - ExploitationImpact: impact, - Title: title, - MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id - return risk -} diff --git a/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go b/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go deleted file mode 100644 index a6e00bec..00000000 --- a/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go +++ /dev/null @@ -1,105 +0,0 @@ -package unguarded_access_from_internet - -import ( - "github.com/threagile/threagile/model" - "sort" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "unguarded-access-from-internet", - Title: "Unguarded Access From Internet", - Description: "Internet-exposed assets must be guarded by a protecting service, application, " + - "or reverse-proxy.", - Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between " + - "due to them being directly exposed on the internet.", - ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", - Action: "Encapsulation of Technical Asset", - Mitigation: "Encapsulate the asset behind a guarding service, application, or reverse-proxy. " + - "For admin maintenance a bastion-host should be used as a jump-server. " + - "For file transfer a store-and-forward-host should be used as an indirect file exchange platform.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope technical assets (excluding " + model.LoadBalancer.String() + ") with confidentiality rating " + - "of " + model.Confidential.String() + " (or higher) or with integrity rating of " + model.Critical.String() + " (or higher) when " + - "accessed directly from the internet. All " + - model.WebServer.String() + ", " + model.WebApplication.String() + ", " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", and " + model.Gateway.String() + " assets are exempted from this risk when " + - "they do not consist of custom developed code and " + - "the data-flow only consists of HTTP or FTP protocols. Access from " + model.Monitoring.String() + " systems " + - "as well as VPN-protected connections are exempted.", - RiskAssessment: "The matching technical assets are at " + model.LowSeverity.String() + " risk. When either the " + - "confidentiality rating is " + model.StrictlyConfidential.String() + " or the integrity rating " + - "is " + model.MissionCritical.String() + ", the risk-rating is considered " + model.MediumSeverity.String() + ". " + - "For assets with RAA values higher than 40 % the risk-rating increases.", - FalsePositives: "When other means of filtering client requests are applied equivalent of " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", or " + model.Gateway.String() + " components.", - ModelFailurePossibleReason: false, - CWE: 501, - } -} - -func SupportedTags() []string { - return []string{} -} - -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope { - commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] - sort.Sort(model.ByTechnicalCommunicationLinkIdSort(commLinks)) - for _, incomingAccess := range commLinks { - if technicalAsset.Technology != model.LoadBalancer { - if !technicalAsset.CustomDevelopedParts { - if (technicalAsset.Technology == model.WebServer || technicalAsset.Technology == model.WebApplication || technicalAsset.Technology == model.ReverseProxy || technicalAsset.Technology == model.WAF || technicalAsset.Technology == model.Gateway) && - (incomingAccess.Protocol == model.HTTP || incomingAccess.Protocol == model.HTTPS) { - continue - } - if technicalAsset.Technology == model.Gateway && - (incomingAccess.Protocol == model.FTP || incomingAccess.Protocol == model.FTPS || incomingAccess.Protocol == model.SFTP) { - continue - } - } - if model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Technology == model.Monitoring || - incomingAccess.VPN { - continue - } - if technicalAsset.Confidentiality >= model.Confidential || technicalAsset.Integrity >= model.Critical { - sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId] - if sourceAsset.Internet { - highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical - risks = append(risks, createRisk(technicalAsset, incomingAccess, - model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk)) - } - } - } - } - } - } - return risks -} - -func createRisk(dataStore model.TechnicalAsset, dataFlow model.CommunicationLink, - clientFromInternet model.TechnicalAsset, moreRisky bool) model.Risk { - impact := model.LowImpact - if moreRisky || dataStore.RAA > 40 { - impact = model.MediumImpact - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.VeryLikely, impact), - ExploitationLikelihood: model.VeryLikely, - ExploitationImpact: impact, - Title: "Unguarded Access from Internet of " + dataStore.Title + " by " + - clientFromInternet.Title + "" + " via " + dataFlow.Title + "", - MostRelevantTechnicalAssetId: dataStore.Id, - MostRelevantCommunicationLinkId: dataFlow.Id, - DataBreachProbability: model.Possible, - DataBreachTechnicalAssetIDs: []string{dataStore.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + dataStore.Id + "@" + clientFromInternet.Id + "@" + dataFlow.Id - return risk -} diff --git a/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go b/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go deleted file mode 100644 index 5fa8f9e2..00000000 --- a/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go +++ /dev/null @@ -1,88 +0,0 @@ -package unguarded_direct_datastore_access - -import ( - "github.com/threagile/threagile/model" -) - -func Category() model.RiskCategory { - return model.RiskCategory{ - Id: "unguarded-direct-datastore-access", - Title: "Unguarded Direct Datastore Access", - Description: "Datastores accessed across trust boundaries must be guarded by some protecting service or application.", - Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive datastores without any protecting components in-between.", - ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", - CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", - Action: "Encapsulation of Datastore", - Mitigation: "Encapsulate the datastore access behind a guarding service or application.", - Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", - Function: model.Architecture, - STRIDE: model.ElevationOfPrivilege, - DetectionLogic: "In-scope technical assets of type " + model.Datastore.String() + " (except " + model.IdentityStoreLDAP.String() + " when accessed from " + model.IdentityProvider.String() + " and " + model.FileServer.String() + " when accessed via file transfer protocols) with confidentiality rating " + - "of " + model.Confidential.String() + " (or higher) or with integrity rating of " + model.Critical.String() + " (or higher) " + - "which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.", // TODO new rule "missing bastion host"? - RiskAssessment: "The matching technical assets are at " + model.LowSeverity.String() + " risk. When either the " + - "confidentiality rating is " + model.StrictlyConfidential.String() + " or the integrity rating " + - "is " + model.MissionCritical.String() + ", the risk-rating is considered " + model.MediumSeverity.String() + ". " + - "For assets with RAA values higher than 40 % the risk-rating increases.", - FalsePositives: "When the caller is considered fully trusted as if it was part of the datastore itself.", - ModelFailurePossibleReason: false, - CWE: 501, - } -} - -func SupportedTags() []string { - return []string{} -} - -// check for datastores that should not be accessed directly across trust boundaries -func GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, id := range model.SortedTechnicalAssetIDs() { - technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] - if !technicalAsset.OutOfScope && technicalAsset.Type == model.Datastore { - for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] { - sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId] - if (technicalAsset.Technology == model.IdentityStoreLDAP || technicalAsset.Technology == model.IdentityStoreDatabase) && - sourceAsset.Technology == model.IdentityProvider { - continue - } - if technicalAsset.Confidentiality >= model.Confidential || technicalAsset.Integrity >= model.Critical { - if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() && !FileServerAccessViaFTP(technicalAsset, incomingAccess) && - incomingAccess.Usage != model.DevOps && !model.IsSharingSameParentTrustBoundary(technicalAsset, sourceAsset) { - highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential || - technicalAsset.Integrity == model.MissionCritical - risks = append(risks, createRisk(technicalAsset, incomingAccess, - model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk)) - } - } - } - } - } - return risks -} - -func FileServerAccessViaFTP(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink) bool { - return technicalAsset.Technology == model.FileServer && - (incomingAccess.Protocol == model.FTP || incomingAccess.Protocol == model.FTPS || incomingAccess.Protocol == model.SFTP) -} - -func createRisk(dataStore model.TechnicalAsset, dataFlow model.CommunicationLink, clientOutsideTrustBoundary model.TechnicalAsset, moreRisky bool) model.Risk { - impact := model.LowImpact - if moreRisky || dataStore.RAA > 40 { - impact = model.MediumImpact - } - risk := model.Risk{ - Category: Category(), - Severity: model.CalculateSeverity(model.Likely, impact), - ExploitationLikelihood: model.Likely, - ExploitationImpact: impact, - Title: "Unguarded Direct Datastore Access of " + dataStore.Title + " by " + - clientOutsideTrustBoundary.Title + " via " + dataFlow.Title + "", - MostRelevantTechnicalAssetId: dataStore.Id, - MostRelevantCommunicationLinkId: dataFlow.Id, - DataBreachProbability: model.Improbable, - DataBreachTechnicalAssetIDs: []string{dataStore.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + dataFlow.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataStore.Id - return risk -} diff --git a/risks/custom/demo/demo-rule.go b/risks/custom/demo/demo-rule.go deleted file mode 100644 index 5eb8d672..00000000 --- a/risks/custom/demo/demo-rule.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "github.com/threagile/threagile/model" -) - -type customRiskRule string - -// exported as symbol (here simply as variable to interface to bundle many functions under one symbol) named "CustomRiskRule" -var CustomRiskRule customRiskRule - -func (r customRiskRule) Category() model.RiskCategory { - return model.RiskCategory{ - Id: "demo", - Title: "Just a Demo", - Description: "Demo Description", - Impact: "Demo Impact", - ASVS: "Demo ASVS", - CheatSheet: "https://example.com", - Action: "Demo Action", - Mitigation: "Demo Mitigation", - Check: "Demo Check", - Function: model.Development, - STRIDE: model.Tampering, - DetectionLogic: "Demo Detection", - RiskAssessment: "Demo Risk Assessment", - FalsePositives: "Demo False Positive.", - ModelFailurePossibleReason: false, - CWE: 0, - } -} - -func (r customRiskRule) SupportedTags() []string { - return []string{"demo tag"} -} - -func (r customRiskRule) GenerateRisks() []model.Risk { - risks := make([]model.Risk, 0) - for _, techAsset := range model.ParsedModelRoot.TechnicalAssets { - risks = append(risks, createRisk(techAsset)) - } - return risks -} - -func createRisk(technicalAsset model.TechnicalAsset) model.Risk { - risk := model.Risk{ - Category: CustomRiskRule.Category(), - Severity: model.CalculateSeverity(model.VeryLikely, model.MediumImpact), - ExploitationLikelihood: model.VeryLikely, - ExploitationImpact: model.MediumImpact, - Title: "Demo risk at " + technicalAsset.Title + "", - MostRelevantTechnicalAssetId: technicalAsset.Id, - DataBreachProbability: model.Possible, - DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, - } - risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id - return risk -} diff --git a/support/render-data-asset-diagram.sh b/support/render-data-asset-diagram.sh deleted file mode 100755 index 0cd54739..00000000 --- a/support/render-data-asset-diagram.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -dot -Tpng $1 -o $2 \ No newline at end of file diff --git a/support/render-data-flow-diagram.sh b/support/render-data-flow-diagram.sh deleted file mode 100755 index 0cd54739..00000000 --- a/support/render-data-flow-diagram.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -dot -Tpng $1 -o $2 \ No newline at end of file diff --git a/support/schema.json b/support/schema.json index b2e5eb5e..bbbafcb2 100644 --- a/support/schema.json +++ b/support/schema.json @@ -32,6 +32,13 @@ "null" ] }, + "contact": { + "description": "Author contact info", + "type": [ + "string", + "null" + ] + }, "homepage": { "description": "Author homepage", "type": [ @@ -44,6 +51,43 @@ "name" ] }, + "contributors": { + "description": "Contributors to the model", + "type": [ + "array", + "null" + ], + "uniqueItems": true, + "items": { + "type": "object", + "properties": { + "name": { + "description": "Contributor name", + "type": [ + "string", + "null" + ] + }, + "contact": { + "description": "Contributor contact info", + "type": [ + "string", + "null" + ] + }, + "homepage": { + "description": "Contributor homepage", + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "name" + ] + } + }, "management_summary_comment": { "description": "Individual management summary for the report", "type": [ @@ -62,6 +106,27 @@ "mission-critical" ] }, + "application_description": { + "description": "General description of the application, its purpose and functionality.", + "type": "object", + "properties": { + "description": { + "description": "Application description for the report", + "type": [ + "string", + "null" + ] + }, + "images": { + "description": "Application images for the report", + "type": [ + "array", + "null" + ], + "uniqueItems": true + } + } + }, "business_overview": { "description": "Individual business overview for the report", "type": "object", @@ -470,7 +535,7 @@ "type": "boolean" }, "data_assets_processed": { - "description": "Data assets processed", + "description": "Data assets processed; ; all data assets stored or sent or received via a communication link (be it as a source or a target) are implicitly also processed and do not need to be listed here.", "type": [ "array", "null" @@ -624,7 +689,7 @@ "description": "VPN", "type": "boolean" }, - "ip_filtered": { + "ip_filtered": { "description": "IP filtered", "type": "boolean" }, @@ -680,9 +745,7 @@ "vpn", "ip_filtered", "readonly", - "usage", - "data_assets_sent", - "data_assets_received" + "usage" ] } } diff --git a/test/abuse_cases.yaml b/test/abuse_cases.yaml new file mode 100644 index 00000000..6315745b --- /dev/null +++ b/test/abuse_cases.yaml @@ -0,0 +1,30 @@ +abuse_cases: + Denial-of-Service: > + As a hacker I want to disturb the functionality of the backend system in order to cause indirect + financial damage via unusable features. + CPU-Cycle Theft: > + As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners. + Ransomware: > + As a hacker I want to encrypt the storage and file systems in order to demand ransom. + Identity Theft: > + As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside. + PII Theft: > + As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage + their repudiation by publishing them. + + ERP-System Compromise: > + As a hacker I want to access the ERP-System in order to steal/modify sensitive business data. + Database Compromise: > + As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive + business data. + Contract Filesystem Compromise: > + As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data. + Cross-Site Scripting Attacks: > + As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and + cause reputational damage. + Denial-of-Service of Enduser Functionality: > + As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial + damage (lower sales). + Denial-of-Service of ERP/DB Functionality: > + As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect + financial damage via unusable internal ERP features (not related to customer portal). diff --git a/test/all.json b/test/all.json new file mode 100644 index 00000000..42123fa5 --- /dev/null +++ b/test/all.json @@ -0,0 +1,2807 @@ +{ + "author": { + "name": "John Doe", + "homepage": "www.example.com" + }, + "title": "Some Example Application", + "date": "2020-07-01T00:00:00Z", + "management_summary_comment": "Just some \u003cb\u003emore\u003c/b\u003e custom summary possible here...\n", + "business_overview": { + "description": "Some more \u003ci\u003edemo text\u003c/i\u003e here and even images..." + }, + "technical_overview": { + "description": "Some more \u003ci\u003edemo text\u003c/i\u003e here and even images..." + }, + "business_criticality": "important", + "security_requirements": { + "EU-DSGVO": "Mandatory EU-Datenschutzgrundverordnung", + "Input Validation": "Strict input validation is required to reduce the overall attack surface.", + "Securing Administrative Access": "Administrative access must be secured with strong encryption and multi-factor authentication." + }, + "questions": { + "How are the admin clients managed/protected against compromise?": "", + "How are the build pipeline components managed/protected against compromise?": "Managed by XYZ\n", + "How are the development clients managed/protected against compromise?": "Managed by XYZ\n" + }, + "abuse_cases": { + "CPU-Cycle Theft": "As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners.\n", + "Contract Filesystem Compromise": "As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data.\n", + "Cross-Site Scripting Attacks": "As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and cause reputational damage.\n", + "Database Compromise": "As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive business data.\n", + "Denial-of-Service": "As a hacker I want to disturb the functionality of the backend system in order to cause indirect financial damage via unusable features.\n", + "Denial-of-Service of ERP/DB Functionality": "As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect financial damage via unusable internal ERP features (not related to customer portal).\n", + "Denial-of-Service of Enduser Functionality": "As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial damage (lower sales).\n", + "ERP-System Compromise": "As a hacker I want to access the ERP-System in order to steal/modify sensitive business data.\n", + "Identity Theft": "As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside.\n", + "PII Theft": "As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage their repudiation by publishing them.\n", + "Ransomware": "As a hacker I want to encrypt the storage and file systems in order to demand ransom.\n" + }, + "tags_available": [ + "linux", + "apache", + "mysql", + "jboss", + "keycloak", + "jenkins", + "git", + "oracle", + "some-erp", + "vmware", + "aws", + "aws:ec2", + "aws:s3" + ], + "data_assets": { + "build-job-config": { + "id": "build-job-config", + "title": "Build Job Config", + "description": "Data for customizing of the build job system.", + "usage": "devops", + "origin": "Company XYZ", + "owner": "Company XYZ", + "confidentiality": "restricted", + "integrity": "critical", + "availability": "operational", + "justification_cia_rating": "Data for customizing of the build job system.\n" + }, + "client-application-code": { + "id": "client-application-code", + "title": "Client Application Code", + "description": "Angular and other client-side code delivered by the application.", + "usage": "devops", + "origin": "Company ABC", + "owner": "Company ABC", + "integrity": "critical", + "availability": "important", + "justification_cia_rating": "The integrity of the public data is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n" + }, + "contract-summaries": { + "id": "contract-summaries", + "title": "Customer Contract Summaries", + "description": "Customer Contract Summaries", + "origin": "Customer", + "owner": "Company XYZ", + "confidentiality": "restricted", + "integrity": "operational", + "availability": "operational", + "justification_cia_rating": "Just some summaries.\n" + }, + "customer-accounts": { + "id": "customer-accounts", + "title": "Customer Accounts", + "description": "Customer Accounts (including transient credentials when entered for checking them)", + "origin": "Customer", + "owner": "Company XYZ", + "quantity": "many", + "confidentiality": "strictly-confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "Customer account data for using the portal are required to be available to offer the portal functionality.\n" + }, + "customer-contracts": { + "id": "customer-contracts", + "title": "Customer Contracts", + "description": "Customer Contracts (PDF)", + "origin": "Customer", + "owner": "Company XYZ", + "quantity": "many", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "operational", + "justification_cia_rating": "Contract data might contain financial data as well as personally identifiable information (PII). The integrity and availability of contract data is required for clearing payment disputes.\n" + }, + "customer-operational-data": { + "id": "customer-operational-data", + "title": "Customer Operational Data", + "description": "Customer Operational Data", + "origin": "Customer", + "owner": "Company XYZ", + "quantity": "many", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "Customer operational data for using the portal are required to be available to offer the portal functionality and are used in the backend transactions.\n" + }, + "db-dumps": { + "id": "db-dumps", + "title": "Database Customizing and Dumps", + "description": "Data for customizing of the DB system, which might include full database dumps.", + "usage": "devops", + "tags": [ + "oracle" + ], + "origin": "Company XYZ", + "owner": "Company XYZ", + "confidentiality": "strictly-confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "Data for customizing of the DB system, which might include full database dumps.\n" + }, + "erp-customizing": { + "id": "erp-customizing", + "title": "ERP Customizing Data", + "description": "Data for customizing of the ERP system.", + "usage": "devops", + "origin": "Company XYZ", + "owner": "Company XYZ", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "Data for customizing of the ERP system.\n" + }, + "erp-logs": { + "id": "erp-logs", + "title": "ERP Logs", + "description": "Logs generated by the ERP system.", + "usage": "devops", + "origin": "Company XYZ", + "owner": "Company XYZ", + "quantity": "many", + "confidentiality": "restricted", + "justification_cia_rating": "Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard transactional logs.\n" + }, + "internal-business-data": { + "id": "internal-business-data", + "title": "Some Internal Business Data", + "description": "Internal business data of the ERP system used unrelated to the customer-facing processes.", + "origin": "Company XYZ", + "owner": "Company XYZ", + "quantity": "few", + "confidentiality": "strictly-confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for internal non-customer-portal-related stuff).\n" + }, + "marketing-material": { + "id": "marketing-material", + "title": "Marketing Material", + "description": "Website and marketing data to inform potential customers and generate new leads.", + "usage": "devops", + "origin": "Company ABC", + "owner": "Company ABC", + "integrity": "important", + "availability": "important", + "justification_cia_rating": "The integrity of the public data is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n" + }, + "server-application-code": { + "id": "server-application-code", + "title": "Server Application Code", + "description": "API and other server-side code of the application.", + "usage": "devops", + "origin": "Company ABC", + "owner": "Company ABC", + "confidentiality": "internal", + "integrity": "mission-critical", + "availability": "important", + "justification_cia_rating": "The integrity of the API code is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n" + } + }, + "technical_assets": { + "apache-webserver": { + "id": "apache-webserver", + "title": "Apache Webserver", + "description": "Apache Webserver hosting the API code and client-side code", + "type": "process", + "size": "application", + "technology": "web-server", + "machine": "container", + "custom_developed_parts": true, + "owner": "Company ABC", + "confidentiality": "internal", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.\n", + "tags": [ + "linux", + "apache", + "aws:ec2" + ], + "data_assets_processed": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data", + "client-application-code", + "server-application-code" + ], + "data_assets_stored": [ + "client-application-code", + "server-application-code" + ], + "data_formats_accepted": [ + "json", + "file" + ], + "communication_links": [ + { + "id": "apache-webserver\u003eerp-system-traffic", + "source_id": "apache-webserver", + "target_id": "erp-system", + "title": "ERP System Traffic", + "description": "Link to the ERP system", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "apache-webserver\u003eauth-credential-check-traffic", + "source_id": "apache-webserver", + "target_id": "identity-provider", + "title": "Auth Credential Check Traffic", + "description": "Link to the identity provider server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "backend-admin-client": { + "id": "backend-admin-client", + "title": "Backend Admin Client", + "description": "Backend admin client", + "usage": "devops", + "size": "component", + "technology": "browser", + "out_of_scope": true, + "used_as_client_by_human": true, + "justification_out_of_scope": "Owned and managed by ops provider", + "owner": "Company XYZ", + "confidentiality": "internal", + "integrity": "operational", + "availability": "operational", + "justification_cia_rating": "The client used by Company XYZ to administer the system.\n", + "data_assets_processed": [ + "erp-logs" + ], + "communication_links": [ + { + "id": "backend-admin-client\u003euser-management-access", + "source_id": "backend-admin-client", + "target_id": "ldap-auth-server", + "title": "User Management Access", + "description": "Link to the LDAP auth server for managing users", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backend-admin-client\u003eerp-web-access", + "source_id": "backend-admin-client", + "target_id": "erp-system", + "title": "ERP Web Access", + "description": "Link to the ERP system (Web)", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "erp-customizing" + ], + "data_assets_received": [ + "erp-logs" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backend-admin-client\u003edb-update-access", + "source_id": "backend-admin-client", + "target_id": "sql-database", + "title": "DB Update Access", + "description": "Link to the database (JDBC tunneled via SSH)", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "db-dumps" + ], + "data_assets_received": [ + "db-dumps", + "erp-logs", + "customer-accounts", + "customer-operational-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "backoffice-client": { + "id": "backoffice-client", + "title": "Backoffice Client", + "description": "Backoffice client", + "size": "component", + "technology": "desktop", + "out_of_scope": true, + "used_as_client_by_human": true, + "justification_out_of_scope": "Owned and managed by Company XYZ company", + "owner": "Company XYZ", + "confidentiality": "confidential", + "integrity": "important", + "availability": "important", + "justification_cia_rating": "The client used by Company XYZ to administer and use the system.\n", + "data_assets_processed": [ + "customer-contracts", + "internal-business-data", + "erp-logs" + ], + "communication_links": [ + { + "id": "backoffice-client\u003eerp-internal-access", + "source_id": "backoffice-client", + "target_id": "erp-system", + "title": "ERP Internal Access", + "description": "Link to the ERP system", + "protocol": "https", + "tags": [ + "some-erp" + ], + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "internal-business-data" + ], + "data_assets_received": [ + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backoffice-client\u003emarketing-cms-editing", + "source_id": "backoffice-client", + "target_id": "marketing-cms", + "title": "Marketing CMS Editing", + "description": "Link to the CMS for editing content", + "protocol": "https", + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "marketing-material" + ], + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "contract-fileserver": { + "id": "contract-fileserver", + "title": "Contract Fileserver", + "description": "NFS Filesystem for storing the contract PDFs", + "type": "datastore", + "size": "component", + "technology": "file-server", + "machine": "virtual", + "owner": "Company ABC", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "important", + "justification_cia_rating": "Contract data might contain financial data as well as personally identifiable information (PII). The integrity and availability of contract data is required for clearing payment disputes. The filesystem is also required to be available for storing new contracts of freshly generated customers.\n", + "tags": [ + "linux", + "aws:s3" + ], + "data_assets_stored": [ + "customer-contracts", + "contract-summaries" + ], + "data_formats_accepted": [ + "file" + ] + }, + "customer-client": { + "id": "customer-client", + "title": "Customer Web Client", + "description": "Customer Web Client", + "size": "component", + "technology": "browser", + "internet": true, + "out_of_scope": true, + "used_as_client_by_human": true, + "justification_out_of_scope": "Owned and managed by enduser customer", + "owner": "Customer", + "confidentiality": "internal", + "integrity": "operational", + "availability": "operational", + "justification_cia_rating": "The client used by the customer to access the system.\n", + "data_assets_processed": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code", + "marketing-material" + ], + "communication_links": [ + { + "id": "customer-client\u003ecustomer-traffic", + "source_id": "customer-client", + "target_id": "load-balancer", + "title": "Customer Traffic", + "description": "Link to the load balancer", + "protocol": "https", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code", + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "erp-system": { + "id": "erp-system", + "title": "Backoffice ERP System", + "description": "ERP system", + "type": "process", + "technology": "erp", + "machine": "virtual", + "redundant": true, + "owner": "Company ABC", + "confidentiality": "strictly-confidential", + "integrity": "mission-critical", + "availability": "mission-critical", + "justification_cia_rating": "The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other Company XYZ internal processes.\n", + "tags": [ + "linux" + ], + "data_assets_processed": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data", + "erp-customizing" + ], + "data_assets_stored": [ + "erp-logs" + ], + "data_formats_accepted": [ + "xml", + "file", + "serialization" + ], + "communication_links": [ + { + "id": "erp-system\u003edatabase-traffic", + "source_id": "erp-system", + "target_id": "sql-database", + "title": "Database Traffic", + "description": "Link to the DB system", + "protocol": "jdbc", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "erp-system\u003enfs-filesystem-access", + "source_id": "erp-system", + "target_id": "contract-fileserver", + "title": "NFS Filesystem Access", + "description": "Link to the file system", + "protocol": "nfs", + "data_assets_sent": [ + "customer-contracts" + ], + "data_assets_received": [ + "customer-contracts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "external-dev-client": { + "id": "external-dev-client", + "title": "External Development Client", + "description": "External developer client", + "usage": "devops", + "technology": "devops-client", + "internet": true, + "multi_tenant": true, + "out_of_scope": true, + "used_as_client_by_human": true, + "justification_out_of_scope": "Owned and managed by external developers", + "owner": "External Developers", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "operational", + "justification_cia_rating": "The clients used by external developers to create parts of the application code.\n", + "tags": [ + "linux" + ], + "data_assets_processed": [ + "client-application-code", + "server-application-code" + ], + "data_assets_stored": [ + "client-application-code", + "server-application-code" + ], + "data_formats_accepted": [ + "file" + ], + "communication_links": [ + { + "id": "external-dev-client\u003egit-repo-code-write-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Code Write Access", + "description": "Link to the Git repo", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "external-dev-client\u003egit-repo-web-ui-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Web-UI Access", + "description": "Link to the Git repo", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "external-dev-client\u003ejenkins-web-ui-access", + "source_id": "external-dev-client", + "target_id": "jenkins-buildserver", + "title": "Jenkins Web-UI Access", + "description": "Link to the Jenkins build server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "build-job-config" + ], + "data_assets_received": [ + "build-job-config" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "git-repo": { + "id": "git-repo", + "title": "Git Repository", + "description": "Git repository server", + "usage": "devops", + "type": "process", + "technology": "sourcecode-repository", + "machine": "virtual", + "multi_tenant": true, + "owner": "Company ABC", + "confidentiality": "confidential", + "integrity": "important", + "availability": "important", + "justification_cia_rating": "The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is therefore rated as confidential.\n", + "tags": [ + "linux", + "git" + ], + "data_assets_processed": [ + "client-application-code", + "server-application-code" + ], + "data_assets_stored": [ + "client-application-code", + "server-application-code" + ], + "data_formats_accepted": [ + "file" + ] + }, + "identity-provider": { + "id": "identity-provider", + "title": "Identity Provider", + "description": "Identity provider server", + "type": "process", + "size": "component", + "technology": "identity-provider", + "machine": "virtual", + "owner": "Company ABC", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "The auth data of the application\n", + "tags": [ + "linux", + "jboss", + "keycloak" + ], + "data_assets_processed": [ + "customer-accounts" + ], + "communication_links": [ + { + "id": "identity-provider\u003eldap-credential-check-traffic", + "source_id": "identity-provider", + "target_id": "ldap-auth-server", + "title": "LDAP Credential Check Traffic", + "description": "Link to the LDAP server", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "jenkins-buildserver": { + "id": "jenkins-buildserver", + "title": "Jenkins Buildserver", + "description": "Jenkins buildserver", + "usage": "devops", + "type": "process", + "technology": "build-pipeline", + "machine": "virtual", + "multi_tenant": true, + "owner": "Company ABC", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "important", + "justification_cia_rating": "The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk of reputation damage and application update unavailability when the build pipeline is compromised.\n", + "tags": [ + "linux", + "jenkins" + ], + "data_assets_processed": [ + "build-job-config", + "client-application-code", + "server-application-code", + "marketing-material" + ], + "data_assets_stored": [ + "build-job-config", + "client-application-code", + "server-application-code", + "marketing-material" + ], + "data_formats_accepted": [ + "file", + "serialization" + ], + "communication_links": [ + { + "id": "jenkins-buildserver\u003egit-repo-code-read-access", + "source_id": "jenkins-buildserver", + "target_id": "git-repo", + "title": "Git Repo Code Read Access", + "description": "Link to the Git repository server", + "protocol": "ssh", + "readonly": true, + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "jenkins-buildserver\u003eapplication-deployment", + "source_id": "jenkins-buildserver", + "target_id": "apache-webserver", + "title": "Application Deployment", + "description": "Link to the Apache webserver", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "jenkins-buildserver\u003ecms-updates", + "source_id": "jenkins-buildserver", + "target_id": "marketing-cms", + "title": "CMS Updates", + "description": "Link to the CMS", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "ldap-auth-server": { + "id": "ldap-auth-server", + "title": "LDAP Auth Server", + "description": "LDAP authentication server", + "type": "datastore", + "size": "component", + "technology": "identity-store-ldap", + "encryption": "transparent", + "owner": "Company ABC", + "confidentiality": "confidential", + "integrity": "critical", + "availability": "critical", + "justification_cia_rating": "The auth data of the application\n", + "tags": [ + "linux" + ], + "data_assets_processed": [ + "customer-accounts" + ], + "data_assets_stored": [ + "customer-accounts" + ] + }, + "load-balancer": { + "id": "load-balancer", + "title": "Load Balancer", + "description": "Load Balancer (HA-Proxy)", + "type": "process", + "size": "component", + "technology": "load-balancer", + "owner": "Company ABC", + "confidentiality": "internal", + "integrity": "mission-critical", + "availability": "mission-critical", + "justification_cia_rating": "The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ usages of the portal and ERP system.\n", + "data_assets_processed": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data", + "client-application-code", + "marketing-material" + ], + "communication_links": [ + { + "id": "load-balancer\u003eweb-application-traffic", + "source_id": "load-balancer", + "target_id": "apache-webserver", + "title": "Web Application Traffic", + "description": "Link to the web server", + "protocol": "http", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "load-balancer\u003ecms-content-traffic", + "source_id": "load-balancer", + "target_id": "marketing-cms", + "title": "CMS Content Traffic", + "description": "Link to the CMS server", + "protocol": "http", + "readonly": true, + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "marketing-cms": { + "id": "marketing-cms", + "title": "Marketing CMS", + "description": "CMS for the marketing content", + "type": "process", + "size": "application", + "technology": "cms", + "machine": "container", + "custom_developed_parts": true, + "owner": "Company ABC", + "confidentiality": "internal", + "integrity": "important", + "availability": "important", + "justification_cia_rating": "The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.\n", + "tags": [ + "linux" + ], + "data_assets_processed": [ + "marketing-material", + "customer-accounts" + ], + "data_assets_stored": [ + "marketing-material" + ], + "communication_links": [ + { + "id": "marketing-cms\u003eauth-traffic", + "source_id": "marketing-cms", + "target_id": "ldap-auth-server", + "title": "Auth Traffic", + "description": "Link to the LDAP auth server", + "protocol": "ldap", + "readonly": true, + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "sql-database": { + "id": "sql-database", + "title": "Customer Contract Database", + "description": "The database behind the ERP system", + "type": "datastore", + "size": "component", + "technology": "database", + "machine": "virtual", + "encryption": "data-with-symmetric-shared-key", + "owner": "Company ABC", + "confidentiality": "strictly-confidential", + "integrity": "mission-critical", + "availability": "mission-critical", + "justification_cia_rating": "The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also for other Company XYZ internal processes.\n", + "tags": [ + "linux", + "mysql" + ], + "data_assets_processed": [ + "db-dumps" + ], + "data_assets_stored": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ] + } + }, + "trust_boundaries": { + "application-network": { + "id": "application-network", + "title": "Application Network", + "description": "Application Network", + "type": "network-cloud-provider", + "tags": [ + "aws" + ], + "technical_assets_inside": [ + "load-balancer" + ], + "trust_boundaries_nested": [ + "web-dmz", + "erp-dmz", + "auth-env" + ] + }, + "auth-env": { + "id": "auth-env", + "title": "Auth Handling Environment", + "description": "Auth Handling Environment", + "type": "execution-environment", + "technical_assets_inside": [ + "identity-provider", + "ldap-auth-server" + ] + }, + "dev-network": { + "id": "dev-network", + "title": "Dev Network", + "description": "Development Network", + "technical_assets_inside": [ + "jenkins-buildserver", + "git-repo", + "backend-admin-client", + "backoffice-client" + ] + }, + "erp-dmz": { + "id": "erp-dmz", + "title": "ERP DMZ", + "description": "ERP DMZ", + "type": "network-cloud-security-group", + "tags": [ + "some-erp" + ], + "technical_assets_inside": [ + "erp-system", + "contract-fileserver", + "sql-database" + ] + }, + "web-dmz": { + "id": "web-dmz", + "title": "Web DMZ", + "description": "Web DMZ", + "type": "network-cloud-security-group", + "technical_assets_inside": [ + "apache-webserver", + "marketing-cms" + ] + } + }, + "shared_runtimes": { + "webapp-virtualization": { + "id": "webapp-virtualization", + "title": "WebApp and Backoffice Virtualization", + "description": "WebApp Virtualization", + "tags": [ + "vmware" + ], + "technical_assets_running": [ + "apache-webserver", + "marketing-cms", + "erp-system", + "contract-fileserver", + "sql-database" + ] + } + }, + "individual_risk_categories": { + "something-strange": { + "id": "something-strange", + "title": "Some Individual Risk Example", + "description": "Some text describing the risk category...", + "impact": "Some text describing the impact...", + "asvs": "V0 - Something Strange", + "cheat_sheet": "https://example.com", + "action": "Some text describing the action...", + "mitigation": "Some text describing the mitigation...", + "check": "Check if XYZ...", + "detection_logic": "Some text describing the detection logic...", + "risk_assessment": "Some text describing the risk assessment...", + "false_positives": "Some text describing the most common types of false positives...", + "stride": "repudiation", + "cwe": 693 + } + }, + "built_in_risk_categories": { + "accidental-secret-leak": { + "id": "accidental-secret-leak", + "title": "Accidental Secret Leak", + "description": "Sourcecode repositories (including their histories) as well as artifact registries can accidentally contain secrets like checked-in or packaged-in passwords, API tokens, certificates, crypto keys, etc.", + "impact": "If this risk is unmitigated, attackers which have access to affected sourcecode repositories or artifact registries might find secrets accidentally checked-in.", + "asvs": "V14 - Configuration Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Build Pipeline Hardening", + "mitigation": "Establish measures preventing accidental check-in or package-in of secrets into sourcecode repositories and artifact registries. This starts by using good .gitignore and .dockerignore files, but does not stop there. See for example tools like \u003ci\u003e\"git-secrets\" or \"Talisman\"\u003c/i\u003e to have check-in preventive measures for secrets. Consider also to regularly scan your repositories for secrets accidentally checked-in using scanning tools like \u003ci\u003e\"gitleaks\" or \"gitrob\"\u003c/i\u003e.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope sourcecode repositories and artifact registries.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Usually no false positives.", + "function": "operations", + "stride": "information-disclosure", + "cwe": 200 + }, + "code-backdooring": { + "id": "code-backdooring", + "title": "Code Backdooring", + "description": "For each build-pipeline component Code Backdooring risks might arise where attackers compromise the build-pipeline in order to let backdoored artifacts be shipped into production. Aside from direct code backdooring this includes backdooring of dependencies and even of more lower-level build infrastructure, like backdooring compilers (similar to what the XcodeGhost malware did) or dependencies.", + "impact": "If this risk remains unmitigated, attackers might be able to execute code on and completely takeover production environments.", + "asvs": "V10 - Malicious Code Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html", + "action": "Build Pipeline Hardening", + "mitigation": "Reduce the attack surface of backdooring the build pipeline by not directly exposing the build pipeline components on the public internet and also not exposing it in front of unmanaged (out-of-scope) developer clients.Also consider the use of code signing to prevent code modifications.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope development relevant technical assets which are either accessed by out-of-scope unmanaged developer clients and/or are directly accessed by any kind of internet-located (non-VPN) component or are themselves directly located on the internet.", + "risk_assessment": "The risk rating depends on the confidentiality and integrity rating of the code being handled and deployed as well as the placement/calling of this technical asset on/from the internet.", + "false_positives": "When the build-pipeline and sourcecode-repo is not exposed to the internet and considered fully trusted (which implies that all accessing clients are also considered fully trusted in terms of their patch management and applied hardening, which must be equivalent to a managed developer client environment) this can be considered a false positive after individual review.", + "function": "operations", + "stride": "tampering", + "cwe": 912 + }, + "container-baseimage-backdooring": { + "id": "container-baseimage-backdooring", + "title": "Container Base Image Backdooring", + "description": "When a technical asset is built using container technologies, Base Image Backdooring risks might arise where base images and other layers used contain vulnerable components or backdoors.\u003cbr\u003e\u003cbr\u003eSee for example: \u003ca href=\"https://techcrunch.com/2018/06/15/tainted-crypto-mining-containers-pulled-from-docker-hub/\"\u003ehttps://techcrunch.com/2018/06/15/tainted-crypto-mining-containers-pulled-from-docker-hub/\u003c/a\u003e", + "impact": "If this risk is unmitigated, attackers might be able to deeply persist in the target system by executing code in deployed containers.", + "asvs": "V10 - Malicious Code Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html", + "action": "Container Infrastructure Hardening", + "mitigation": "Apply hardening of all container infrastructures (see for example the \u003ci\u003eCIS-Benchmarks for Docker and Kubernetes\u003c/i\u003e and the \u003ci\u003eDocker Bench for Security\u003c/i\u003e). Use only trusted base images of the original vendors, verify digital signatures and apply image creation best practices. Also consider using Google's \u003ci\u003eDistroless\u003c/i\u003e base images or otherwise very small base images. Regularly execute container image scans with tools checking the layers for vulnerable components.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS/CSVS applied?", + "detection_logic": "In-scope technical assets running as containers.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets.", + "false_positives": "Fully trusted (i.e. reviewed and cryptographically signed or similar) base images of containers can be considered as false positives after individual review.", + "function": "operations", + "stride": "tampering", + "cwe": 912 + }, + "container-platform-escape": { + "id": "container-platform-escape", + "title": "Container Platform Escape", + "description": "Container platforms are especially interesting targets for attackers as they host big parts of a containerized runtime infrastructure. When not configured and operated with security best practices in mind, attackers might exploit a vulnerability inside an container and escape towards the platform as highly privileged users. These scenarios might give attackers capabilities to attack every other container as owning the container platform (via container escape attacks) equals to owning every container.", + "impact": "If this risk is unmitigated, attackers which have successfully compromised a container (via other vulnerabilities) might be able to deeply persist in the target system by executing code in many deployed containers and the container platform itself.", + "asvs": "V14 - Configuration Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html", + "action": "Container Infrastructure Hardening", + "mitigation": "Apply hardening of all container infrastructures. \u003cp\u003eSee for example the \u003ci\u003eCIS-Benchmarks for Docker and Kubernetes\u003c/i\u003e as well as the \u003ci\u003eDocker Bench for Security\u003c/i\u003e ( \u003ca href=\"https://github.com/docker/docker-bench-security\"\u003ehttps://github.com/docker/docker-bench-security\u003c/a\u003e ) or \u003ci\u003eInSpec Checks for Docker and Kubernetes\u003c/i\u003e ( \u003ca href=\"https://github.com/dev-sec/cis-kubernetes-benchmark\"\u003ehttps://github.com/dev-sec/cis-docker-benchmark\u003c/a\u003e and \u003ca href=\"https://github.com/dev-sec/cis-kubernetes-benchmark\"\u003ehttps://github.com/dev-sec/cis-kubernetes-benchmark\u003c/a\u003e ). Use only trusted base images, verify digital signatures and apply image creation best practices. Also consider using Google's \u003cb\u003eDistroless\u003c/i\u003e base images or otherwise very small base images. Apply namespace isolation and nod affinity to separate pods from each other in terms of access and nodes the same style as you separate data.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS or CSVS chapter applied?", + "detection_logic": "In-scope container platforms.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Container platforms not running parts of the target architecture can be considered as false positives after individual review.", + "function": "operations", + "stride": "elevation-of-privilege", + "cwe": 1008 + }, + "cross-site-request-forgery": { + "id": "cross-site-request-forgery", + "title": "Cross-Site Request Forgery (CSRF)", + "description": "When a web application is accessed via web protocols Cross-Site Request Forgery (CSRF) risks might arise.", + "impact": "If this risk remains unmitigated, attackers might be able to trick logged-in victim users into unwanted actions within the web application by visiting an attacker controlled web site.", + "asvs": "V4 - Access Control Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html", + "action": "CSRF Prevention", + "mitigation": "Try to use anti-CSRF tokens ot the double-submit patterns (at least for logged-in requests). When your authentication scheme depends on cookies (like session or token cookies), consider marking them with the same-site flag. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope web applications accessed via typical web access protocols.", + "risk_assessment": "The risk rating depends on the integrity rating of the data sent across the communication link.", + "false_positives": "Web applications passing the authentication sate via custom headers instead of cookies can eventually be false positives. Also when the web application is not accessed via a browser-like component (i.e not by a human user initiating the request that gets passed through all components until it reaches the web application) this can be considered a false positive.", + "function": "development", + "cwe": 352 + }, + "cross-site-scripting": { + "id": "cross-site-scripting", + "title": "Cross-Site Scripting (XSS)", + "description": "For each web application Cross-Site Scripting (XSS) risks might arise. In terms of the overall risk level take other applications running on the same domain into account as well.", + "impact": "If this risk remains unmitigated, attackers might be able to access individual victim sessions and steal or modify user data.", + "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html", + "action": "XSS Prevention", + "mitigation": "Try to encode all values sent back to the browser and also handle DOM-manipulations in a safe way to avoid DOM-based XSS. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope web applications.", + "risk_assessment": "The risk rating depends on the sensitivity of the data processed or stored in the web application.", + "false_positives": "When the technical asset is not accessed via a browser-like component (i.e not by a human user initiating the request that gets passed through all components until it reaches the web application) this can be considered a false positive.", + "function": "development", + "stride": "tampering", + "cwe": 79 + }, + "dos-risky-access-across-trust-boundary": { + "id": "dos-risky-access-across-trust-boundary", + "title": "DoS-risky Access Across Trust-Boundary", + "description": "Assets accessed across trust boundaries with critical or mission-critical availability rating are more prone to Denial-of-Service (DoS) risks.", + "impact": "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html", + "action": "Anti-DoS Measures", + "mitigation": "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. Generally applying redundancy on the targeted technical asset reduces the risk of DoS.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets (excluding load-balancer) with availability rating of critical or higher which have incoming data-flows across a network trust-boundary (excluding devops usage).", + "risk_assessment": "Matching technical assets with availability rating of critical or higher are at low risk. When the availability rating is mission-critical and neither a VPN nor IP filter for the incoming data-flow nor redundancy for the asset is applied, the risk-rating is considered medium.", + "false_positives": "When the accessed target operations are not time- or resource-consuming.", + "function": "operations", + "stride": "denial-of-service", + "cwe": 400 + }, + "incomplete-model": { + "id": "incomplete-model", + "title": "Incomplete Model", + "description": "When the threat model contains unknown technologies or transfers data over unknown protocols, this is an indicator for an incomplete model.", + "impact": "If this risk is unmitigated, other risks might not be noticed as the model is incomplete.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html", + "action": "Threat Modeling Completeness", + "mitigation": "Try to find out what technology or protocol is used instead of specifying that it is unknown.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "All technical assets and communication links with technology type or protocol type specified as unknown.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "information-disclosure", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "ldap-injection": { + "id": "ldap-injection", + "title": "LDAP-Injection", + "description": "When an LDAP server is accessed LDAP-Injection risks might arise. The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.", + "impact": "If this risk remains unmitigated, attackers might be able to modify LDAP queries and access more data from the LDAP server than allowed.", + "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html", + "action": "LDAP-Injection Prevention", + "mitigation": "Try to use libraries that properly encode LDAP meta characters in searches and queries to access the LDAP sever in order to stay safe from LDAP-Injection vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope clients accessing LDAP servers via typical LDAP access protocols.", + "risk_assessment": "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.", + "false_positives": "LDAP server queries by search values not consisting of parts controllable by the caller can be considered as false positives after individual review.", + "function": "development", + "stride": "tampering", + "cwe": 90 + }, + "missing-authentication": { + "id": "missing-authentication", + "title": "Missing Authentication", + "description": "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes or stores sensitive data. ", + "impact": "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.", + "asvs": "V2 - Authentication Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", + "action": "Authentication of Incoming Requests", + "mitigation": "Apply an authentication method to the technical asset. To protect highly sensitive data consider the use of two-factor authentication for human users.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets (except load-balancer, reverse-proxy, service-registry, waf, ids, and ips and in-process calls) should authenticate incoming requests when the asset processes or stores sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).", + "risk_assessment": "The risk rating (medium or high) depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.", + "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.", + "function": "architecture", + "stride": "elevation-of-privilege", + "cwe": 306 + }, + "missing-authentication-second-factor": { + "id": "missing-authentication-second-factor", + "title": "Missing Two-Factor Authentication (2FA)", + "description": "Technical assets (especially multi-tenant systems) should authenticate incoming requests with two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.", + "impact": "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.", + "asvs": "V2 - Authentication Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html", + "action": "Authentication with Second Factor (2FA)", + "mitigation": "Apply an authentication method to the technical asset protecting highly sensitive data via two-factor authentication for human users.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets (except load-balancer, reverse-proxy, waf, ids, and ips) should authenticate incoming requests via two-factor authentication (2FA) when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.", + "risk_assessment": "medium", + "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.", + "stride": "elevation-of-privilege", + "cwe": 308 + }, + "missing-build-infrastructure": { + "id": "missing-build-infrastructure", + "title": "Missing Build Infrastructure", + "description": "The modeled architecture does not contain a build infrastructure (devops-client, sourcecode-repo, build-pipeline, etc.), which might be the risk of a model missing critical assets (and thus not seeing their risks). If the architecture contains custom-developed parts, the pipeline where code gets developed and built needs to be part of the model.", + "impact": "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model due to critical build infrastructure components missing in the model.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Build Pipeline Hardening", + "mitigation": "Include the build infrastructure in the model.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Models with in-scope custom-developed parts missing in-scope development (code creation) and build infrastructure components (devops-client, sourcecode-repo, build-pipeline, etc.).", + "risk_assessment": "The risk rating depends on the highest sensitivity of the in-scope assets running custom-developed parts.", + "false_positives": "Models not having any custom-developed parts can be considered as false positives after individual review.", + "function": "architecture", + "stride": "tampering", + "model_failure_possible_reason": true, + "cwe": 1127 + }, + "missing-cloud-hardening": { + "id": "missing-cloud-hardening", + "title": "Missing Cloud Hardening", + "description": "Cloud components should be hardened according to the cloud vendor best practices. This affects their configuration, auditing, and further areas.", + "impact": "If this risk is unmitigated, attackers might access cloud components in an unintended way.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Cloud Hardening", + "mitigation": "Apply hardening of all cloud components and services, taking special care to follow the individual risk descriptions (which depend on the cloud provider tags in the model). \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eAmazon Web Services (AWS)\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Amazon Web Services\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"PacBot\", \"CloudSploit\", \"CloudMapper\", \"ScoutSuite\", or \"Prowler AWS CIS Benchmark Tool\"\u003c/i\u003e). \u003cbr\u003eFor EC2 and other servers running Amazon Linux, follow the \u003ci\u003eCIS Benchmark for Amazon Linux\u003c/i\u003e and switch to IMDSv2. \u003cbr\u003eFor S3 buckets follow the \u003ci\u003eSecurity Best Practices for Amazon S3\u003c/i\u003e at \u003ca href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html\"\u003ehttps://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html\u003c/a\u003e to avoid accidental leakage. \u003cbr\u003eAlso take a look at some of these tools: \u003ca href=\"https://github.com/toniblyx/my-arsenal-of-aws-security-tools\"\u003ehttps://github.com/toniblyx/my-arsenal-of-aws-security-tools\u003c/a\u003e \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eMicrosoft Azure\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Microsoft Azure\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\" or \"ScoutSuite\"\u003c/i\u003e).\u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eGoogle Cloud Platform\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Google Cloud Computing Platform\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\" or \"ScoutSuite\"\u003c/i\u003e). \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eOracle Cloud Platform\u003c/b\u003e: Follow the hardening best practices (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\"\u003c/i\u003e).", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope cloud components (either residing in cloud trust boundaries or more specifically tagged with cloud provider types).", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Cloud components not running parts of the target architecture can be considered as false positives after individual review.", + "function": "operations", + "stride": "tampering", + "cwe": 1008 + }, + "missing-file-validation": { + "id": "missing-file-validation", + "title": "Missing File Validation", + "description": "When a technical asset accepts files, these input files should be strictly validated about filename and type.", + "impact": "If this risk is unmitigated, attackers might be able to provide malicious files to the application.", + "asvs": "V12 - File and Resources Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/File_Upload_Cheat_Sheet.html", + "action": "File Validation", + "mitigation": "Filter by file extension and discard (if feasible) the name provided. Whitelist the accepted file types and determine the mime-type on the server-side (for example via \"Apache Tika\" or similar checks). If the file is retrievable by end users and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they were uploaded, also apply a fresh malware scan during retrieval to scan with newer signatures of popular malware). Also enforce limits on maximum file size to avoid denial-of-service like scenarios.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets with custom-developed code accepting file data formats.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Fully trusted (i.e. cryptographically signed or similar) files can be considered as false positives after individual review.", + "function": "development", + "cwe": 434 + }, + "missing-hardening": { + "id": "missing-hardening", + "title": "Missing Hardening", + "description": "Technical assets with a Relative Attacker Attractiveness (RAA) value of 55 % or higher should be explicitly hardened taking best practices and vendor hardening guides into account.", + "impact": "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.", + "asvs": "V14 - Configuration Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "System Hardening", + "mitigation": "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets with RAA values of 55 % or higher. Generally for high-value targets like data stores, application servers, identity providers and ERP systems this limit is reduced to 40 %", + "risk_assessment": "The risk rating depends on the sensitivity of the data processed or stored in the technical asset.", + "false_positives": "Usually no false positives.", + "function": "operations", + "stride": "tampering", + "cwe": 16 + }, + "missing-identity-propagation": { + "id": "missing-identity-propagation", + "title": "Missing Identity Propagation", + "description": "Technical assets (especially multi-tenant systems), which usually process data for end users should authorize every request based on the identity of the end user when the data flow is authenticated (i.e. non-public). For DevOps usages at least a technical-user authorization is required.", + "impact": "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within the system due to missing resource-based authorization checks.", + "asvs": "V4 - Access Control Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", + "action": "Identity Propagation and Resource-based Authorization", + "mitigation": "When processing requests for end users if possible authorize in the backend against the propagated identity of the end user. This can be achieved in passing JWTs or similar tokens and checking them in the backend services. For DevOps usages apply at least a technical-user authorization.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope service-like technical assets which usually process data based on end user requests, if authenticated (i.e. non-public), should authorize incoming requests based on the propagated end user identity when their rating is sensitive. This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). DevOps usages are exempted from this risk.", + "risk_assessment": "The risk rating (medium or high) depends on the confidentiality, integrity, and availability rating of the technical asset.", + "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.", + "function": "architecture", + "stride": "elevation-of-privilege", + "cwe": 284 + }, + "missing-identity-provider-isolation": { + "id": "missing-identity-provider-isolation", + "title": "Missing Identity Provider Isolation", + "description": "Highly sensitive identity provider assets and their identity data stores should be isolated from other assets by their own network segmentation trust-boundary (execution-environment boundaries do not count as network isolation).", + "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards highly sensitive identity provider assets and their identity data stores, as they are not separated by network segmentation.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Network Segmentation", + "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity data stores.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope identity provider assets and their identity data stores when surrounded by other (not identity-related) assets (without a network trust-boundary in-between). This risk is especially prevalent when other non-identity related assets are within the same execution environment (i.e. same database or same application server).", + "risk_assessment": "Default is high impact. The impact is increased to very-high when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.", + "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were identity providers with data of highest sensitivity.", + "function": "operations", + "stride": "elevation-of-privilege", + "cwe": 1008 + }, + "missing-identity-store": { + "id": "missing-identity-store", + "title": "Missing Identity Store", + "description": "The modeled architecture does not contain an identity store, which might be the risk of a model missing critical assets (and thus not seeing their risks).", + "impact": "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store that is currently missing in the model.", + "asvs": "V2 - Authentication Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html", + "action": "Identity Store", + "mitigation": "Include an identity store in the model if the application has a login.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Models with authenticated data-flows authorized via end user identity missing an in-scope identity store.", + "risk_assessment": "The risk rating depends on the sensitivity of the end user-identity authorized technical assets and their data assets processed and stored.", + "false_positives": "Models only offering data/services without any real authentication need can be considered as false positives after individual review.", + "function": "architecture", + "model_failure_possible_reason": true, + "cwe": 287 + }, + "missing-network-segmentation": { + "id": "missing-network-segmentation", + "title": "Missing Network Segmentation", + "description": "Highly sensitive assets and/or data stores residing in the same network segment than other lower sensitive assets (like webservers or content management systems etc.) should be better protected by a network segmentation trust-boundary.", + "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards more valuable targets, as they are not separated by network segmentation.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Network Segmentation", + "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive assets and/or data stores.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets with high sensitivity and RAA values as well as data stores when surrounded by assets (without a network trust-boundary in-between) which are of type client-system, web-server, web-application, cms, web-service-rest, web-service-soap, build-pipeline, sourcecode-repository, monitoring, or similar and there is no direct connection between these (hence no requirement to be so close to each other).", + "risk_assessment": "Default is low risk. The risk is increased to medium when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.", + "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were containing/processing highly sensitive data.", + "function": "operations", + "stride": "elevation-of-privilege", + "cwe": 1008 + }, + "missing-vault": { + "id": "missing-vault", + "title": "Missing Vault (Secret Storage)", + "description": "In order to avoid the risk of secret leakage via config files (when attacked through vulnerabilities being able to read files like Path-Traversal and others), it is best practice to use a separate hardened process with proper authentication, authorization, and audit logging to access config secrets (like credentials, private keys, client certificates, etc.). This component is usually some kind of Vault.", + "impact": "If this risk is unmitigated, attackers might be able to easier steal config secrets (like credentials, private keys, client certificates, etc.) once a vulnerability to access files is present and exploited.", + "asvs": "V6 - Stored Cryptography Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html", + "action": "Vault (Secret Storage)", + "mitigation": "Consider using a Vault (Secret Storage) to securely store and access config secrets (like credentials, private keys, client certificates, etc.).", + "check": "Is a Vault (Secret Storage) in place?", + "detection_logic": "Models without a Vault (Secret Storage).", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Models where no technical assets have any kind of sensitive config data to protect can be considered as false positives after individual review.", + "function": "architecture", + "stride": "information-disclosure", + "model_failure_possible_reason": true, + "cwe": 522 + }, + "missing-vault-isolation": { + "id": "missing-vault-isolation", + "title": "Missing Vault Isolation", + "description": "Highly sensitive vault assets and their data stores should be isolated from other assets by their own network segmentation trust-boundary (execution-environment boundaries do not count as network isolation).", + "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards highly sensitive vault assets and their data stores, as they are not separated by network segmentation.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Network Segmentation", + "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their data stores.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope vault assets when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).", + "risk_assessment": "Default is medium impact. The impact is increased to high when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.", + "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were vaults with data of highest sensitivity.", + "function": "operations", + "stride": "elevation-of-privilege", + "cwe": 1008 + }, + "missing-waf": { + "id": "missing-waf", + "title": "Missing Web Application Firewall (WAF)", + "description": "To have a first line of filtering defense, security architectures with web-services or web-applications should include a WAF in front of them. Even though a WAF is not a replacement for security (all components must be secure even without a WAF) it adds another layer of defense to the overall system by delaying some attacks and having easier attack alerting through it.", + "impact": "If this risk is unmitigated, attackers might be able to apply standard attack pattern tests at great speed without any filtering.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Virtual_Patching_Cheat_Sheet.html", + "action": "Web Application Firewall (WAF)", + "mitigation": "Consider placing a Web Application Firewall (WAF) in front of the web-services and/or web-applications. For cloud environments many cloud providers offer pre-configured WAFs. Even reverse proxies can be enhances by a WAF component via ModSecurity plugins.", + "check": "Is a Web Application Firewall (WAF) in place?", + "detection_logic": "In-scope web-services and/or web-applications accessed across a network trust boundary not having a Web Application Firewall (WAF) in front of them.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Targets only accessible via WAFs or reverse proxies containing a WAF component (like ModSecurity) can be considered as false positives after individual review.", + "function": "operations", + "stride": "tampering", + "cwe": 1008 + }, + "mixed-targets-on-shared-runtime": { + "id": "mixed-targets-on-shared-runtime", + "title": "Mixed Targets on Shared Runtime", + "description": "Different attacker targets (like frontend and backend/datastore components) should not be running on the same shared (underlying) runtime.", + "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards more valuable targets, as they are running on the same shared runtime.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Runtime Separation", + "mitigation": "Use separate runtime environments for running different target components or apply similar separation styles to prevent load- or breach-related problems originating from one more attacker-facing asset impacts also the other more critical rated backend/datastore assets.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Shared runtime running technical assets of different trust-boundaries is at risk. Also mixing backend/datastore with frontend components on the same shared runtime is considered a risk.", + "risk_assessment": "The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of the technical asset running on the shared runtime.", + "false_positives": "When all assets running on the shared runtime are hardened and protected to the same extend as if all were containing/processing highly sensitive data.", + "function": "operations", + "stride": "elevation-of-privilege", + "cwe": 1008 + }, + "path-traversal": { + "id": "path-traversal", + "title": "Path-Traversal", + "description": "When a filesystem is accessed Path-Traversal or Local-File-Inclusion (LFI) risks might arise. The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed or stored.", + "impact": "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, business data files, etc.) from the filesystem of affected components.", + "asvs": "V12 - File and Resources Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Input_Validation_Cheat_Sheet.html", + "action": "Path-Traversal Prevention", + "mitigation": "Before accessing the file cross-check that it resides in the expected folder and is of the expected type and filename/suffix. Try to use a mapping if possible instead of directly accessing by a filename which is (partly or fully) provided by the caller. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Filesystems accessed by in-scope callers.", + "risk_assessment": "The risk rating depends on the sensitivity of the data stored inside the technical asset.", + "false_positives": "File accesses by filenames not consisting of parts controllable by the caller can be considered as false positives after individual review.", + "function": "development", + "stride": "information-disclosure", + "cwe": 22 + }, + "push-instead-of-pull-deployment": { + "id": "push-instead-of-pull-deployment", + "title": "Push instead of Pull Deployment", + "description": "When comparing push-based vs. pull-based deployments from a security perspective, pull-based deployments improve the overall security of the deployment targets. Every exposed interface of a production system to accept a deployment increases the attack surface of the production system, thus a pull-based approach exposes less attack surface relevant interfaces.", + "impact": "If this risk is unmitigated, attackers might have more potential target vectors for attacks, as the overall attack surface is unnecessarily increased.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Build Pipeline Hardening", + "mitigation": "Try to prefer pull-based deployments (like GitOps scenarios offer) over push-based deployments to reduce the attack surface of the production system.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Models with build pipeline components accessing in-scope targets of deployment (in a non-readonly way) which are not build-related components themselves.", + "risk_assessment": "The risk rating depends on the highest sensitivity of the deployment targets running custom-developed parts.", + "false_positives": "Communication links that are not deployment paths can be considered as false positives after individual review.", + "function": "architecture", + "stride": "tampering", + "model_failure_possible_reason": true, + "cwe": 1127 + }, + "search-query-injection": { + "id": "search-query-injection", + "title": "Search-Query Injection", + "description": "When a search engine server is accessed Search-Query Injection risks might arise.\u003cbr\u003e\u003cbr\u003eSee for example \u003ca href=\"https://github.com/veracode-research/solr-injection\"\u003ehttps://github.com/veracode-research/solr-injection\u003c/a\u003e and \u003ca href=\"https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\"\u003ehttps://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\u003c/a\u003e for more details (here related to Solr, but in general showcasing the topic of search query injections).", + "impact": "If this risk remains unmitigated, attackers might be able to read more data from the search index and eventually further escalate towards a deeper system penetration via code executions.", + "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_Cheat_Sheet.html", + "action": "Search-Query Injection Prevention", + "mitigation": "Try to use libraries that properly encode search query meta characters in searches and don't expose the query unfiltered to the caller. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope clients accessing search engine servers via typical search access protocols.", + "risk_assessment": "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed or stored.", + "false_positives": "Server engine queries by search values not consisting of parts controllable by the caller can be considered as false positives after individual review.", + "function": "development", + "stride": "tampering", + "cwe": 74 + }, + "server-side-request-forgery": { + "id": "server-side-request-forgery", + "title": "Server-Side Request Forgery (SSRF)", + "description": "When a server system (i.e. not a client) is accessing other server systems via typical web protocols Server-Side Request Forgery (SSRF) or Local-File-Inclusion (LFI) or Remote-File-Inclusion (RFI) risks might arise. ", + "impact": "If this risk is unmitigated, attackers might be able to access sensitive services or files of network-reachable components by modifying outgoing calls of affected components.", + "asvs": "V12 - File and Resources Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html", + "action": "SSRF Prevention", + "mitigation": "Try to avoid constructing the outgoing target URL with caller controllable values. Alternatively use a mapping (whitelist) when accessing outgoing URLs instead of creating them including caller controllable values. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope non-client systems accessing (using outgoing communication links) targets with either HTTP or HTTPS protocol.", + "risk_assessment": "The risk rating (low or medium) depends on the sensitivity of the data assets receivable via web protocols from targets within the same network trust-boundary as well on the sensitivity of the data assets receivable via web protocols from the target asset itself. Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF.", + "false_positives": "Servers not sending outgoing web requests can be considered as false positives after review.", + "function": "development", + "stride": "information-disclosure", + "cwe": 918 + }, + "service-registry-poisoning": { + "id": "service-registry-poisoning", + "title": "Service Registry Poisoning", + "description": "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.", + "impact": "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or malicious lookup and config data leading to breach of sensitive data.", + "asvs": "V10 - Malicious Code Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html", + "action": "Service Registry Integrity Check", + "mitigation": "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope service registries.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical assets accessing the service registry as well as the data assets processed or stored.", + "false_positives": "Service registries not used for service discovery can be considered as false positives after individual review.", + "function": "architecture", + "cwe": 693 + }, + "sql-nosql-injection": { + "id": "sql-nosql-injection", + "title": "SQL/NoSQL-Injection", + "description": "When a database is accessed via database access protocols SQL/NoSQL-Injection risks might arise. The risk rating depends on the sensitivity technical asset itself and of the data assets processed or stored.", + "impact": "If this risk is unmitigated, attackers might be able to modify SQL/NoSQL queries to steal and modify data and eventually further escalate towards a deeper system penetration via code executions.", + "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html", + "action": "SQL/NoSQL-Injection Prevention", + "mitigation": "Try to use parameter binding to be safe from injection vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Database accessed via typical database access protocols by in-scope clients.", + "risk_assessment": "The risk rating depends on the sensitivity of the data stored inside the database.", + "false_positives": "Database accesses by queries not consisting of parts controllable by the caller can be considered as false positives after individual review.", + "function": "development", + "stride": "tampering", + "cwe": 89 + }, + "unchecked-deployment": { + "id": "unchecked-deployment", + "title": "Unchecked Deployment", + "description": "For each build-pipeline component Unchecked Deployment risks might arise when the build-pipeline does not include established DevSecOps best-practices. DevSecOps best-practices scan as part of CI/CD pipelines for vulnerabilities in source- or byte-code, dependencies, container layers, and dynamically against running test systems. There are several open-source and commercial tools existing in the categories DAST, SAST, and IAST.", + "impact": "If this risk remains unmitigated, vulnerabilities in custom-developed software or their dependencies might not be identified during continuous deployment cycles.", + "asvs": "V14 - Configuration Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html", + "action": "Build Pipeline Hardening", + "mitigation": "Apply DevSecOps best-practices and use scanning tools to identify vulnerabilities in source- or byte-code,dependencies, container layers, and optionally also via dynamic scans against running test systems.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "All development-relevant technical assets.", + "risk_assessment": "The risk rating depends on the highest rating of the technical assets and data assets processed by deployment-receiving targets.", + "false_positives": "When the build-pipeline does not build any software components it can be considered a false positive after individual review.", + "function": "architecture", + "stride": "tampering", + "cwe": 1127 + }, + "unencrypted-asset": { + "id": "unencrypted-asset", + "title": "Unencrypted Technical Assets", + "description": "Due to the confidentiality rating of the technical asset itself and/or the processed data assets this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.", + "impact": "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.", + "asvs": "V6 - Stored Cryptography Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html", + "action": "Encryption of Technical Asset", + "mitigation": "Apply encryption to the technical asset.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope unencrypted technical assets (excluding reverse-proxy, load-balancer, waf, ids, ips and embedded components like library) storing data assets rated at least as confidential or critical. For technical assets storing data assets rated as strictly-confidential or mission-critical the encryption must be of type data-with-enduser-individual-key.", + "risk_assessment": "Depending on the confidentiality rating of the stored data-assets either medium or high risk.", + "false_positives": "When all sensitive data stored within the asset is already fully encrypted on document or data level.", + "function": "operations", + "stride": "information-disclosure", + "cwe": 311 + }, + "unencrypted-communication": { + "id": "unencrypted-communication", + "title": "Unencrypted Communication", + "description": "Due to the confidentiality and/or integrity rating of the data assets transferred over the communication link this connection must be encrypted.", + "impact": "If this risk is unmitigated, network attackers might be able to to eavesdrop on unencrypted sensitive data sent between components.", + "asvs": "V9 - Communication Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html", + "action": "Encryption of Communication Links", + "mitigation": "Apply transport layer encryption to the communication link.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Unencrypted technical communication links of in-scope technical assets (excluding monitoring traffic as well as local-file-access and in-process-library-call) transferring sensitive data.", + "risk_assessment": "Depending on the confidentiality rating of the transferred data-assets either medium or high risk.", + "false_positives": "When all sensitive data sent over the communication link is already fully encrypted on document or data level. Also intra-container/pod communication can be considered false positive when container orchestration platform handles encryption.", + "function": "operations", + "stride": "information-disclosure", + "cwe": 319 + }, + "unguarded-access-from-internet": { + "id": "unguarded-access-from-internet", + "title": "Unguarded Access From Internet", + "description": "Internet-exposed assets must be guarded by a protecting service, application, or reverse-proxy.", + "impact": "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between due to them being directly exposed on the internet.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Encapsulation of Technical Asset", + "mitigation": "Encapsulate the asset behind a guarding service, application, or reverse-proxy. For admin maintenance a bastion-host should be used as a jump-server. For file transfer a store-and-forward-host should be used as an indirect file exchange platform.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets (excluding load-balancer) with confidentiality rating of confidential (or higher) or with integrity rating of critical (or higher) when accessed directly from the internet. All web-server, web-application, reverse-proxy, waf, and gateway assets are exempted from this risk when they do not consist of custom developed code and the data-flow only consists of HTTP or FTP protocols. Access from monitoring systems as well as VPN-protected connections are exempted.", + "risk_assessment": "The matching technical assets are at low risk. When either the confidentiality rating is strictly-confidential or the integrity rating is mission-critical, the risk-rating is considered medium. For assets with RAA values higher than 40 % the risk-rating increases.", + "false_positives": "When other means of filtering client requests are applied equivalent of reverse-proxy, waf, or gateway components.", + "function": "architecture", + "stride": "elevation-of-privilege", + "cwe": 501 + }, + "unguarded-direct-datastore-access": { + "id": "unguarded-direct-datastore-access", + "title": "Unguarded Direct Datastore Access", + "description": "Data stores accessed across trust boundaries must be guarded by some protecting service or application.", + "impact": "If this risk is unmitigated, attackers might be able to directly attack sensitive data stores without any protecting components in-between.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Encapsulation of Datastore", + "mitigation": "Encapsulate the datastore access behind a guarding service or application.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets of type datastore (except identity-store-ldap when accessed from identity-provider and file-server when accessed via file transfer protocols) with confidentiality rating of confidential (or higher) or with integrity rating of critical (or higher) which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.", + "risk_assessment": "The matching technical assets are at low risk. When either the confidentiality rating is strictly-confidential or the integrity rating is mission-critical, the risk-rating is considered medium. For assets with RAA values higher than 40 % the risk-rating increases.", + "false_positives": "When the caller is considered fully trusted as if it was part of the datastore itself.", + "function": "architecture", + "stride": "elevation-of-privilege", + "cwe": 501 + }, + "unnecessary-communication-link": { + "id": "unnecessary-communication-link", + "title": "Unnecessary Communication Link", + "description": "When a technical communication link does not send or receive any data assets, this is an indicator for an unnecessary communication link (or for an incomplete model).", + "impact": "If this risk is unmitigated, attackers might be able to target unnecessary communication links.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Attack Surface Reduction", + "mitigation": "Try to avoid using technical communication links that do not send or receive anything.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets' technical communication links not sending or receiving any data assets.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "elevation-of-privilege", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "unnecessary-data-asset": { + "id": "unnecessary-data-asset", + "title": "Unnecessary Data Asset", + "description": "When a data asset is not processed or stored by any data assets and also not transferred by any communication links, this is an indicator for an unnecessary data asset (or for an incomplete model).", + "impact": "If this risk is unmitigated, attackers might be able to access unnecessary data assets using other vulnerabilities.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Attack Surface Reduction", + "mitigation": "Try to avoid having data assets that are not required/used.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Modelled data assets not processed or stored by any data assets and also not transferred by any communication links.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "elevation-of-privilege", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "unnecessary-data-transfer": { + "id": "unnecessary-data-transfer", + "title": "Unnecessary Data Transfer", + "description": "When a technical asset sends or receives data assets, which it neither processes or stores this is an indicator for unnecessarily transferred data (or for an incomplete model). When the unnecessarily transferred data assets are sensitive, this poses an unnecessary risk of an increased attack surface.", + "impact": "If this risk is unmitigated, attackers might be able to target unnecessarily transferred data.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Attack Surface Reduction", + "mitigation": "Try to avoid sending or receiving sensitive data assets which are not required (i.e. neither processed or stored) by the involved technical asset.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets sending or receiving sensitive data assets which are neither processed nor stored by the technical asset are flagged with this risk. The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of the technical asset. Monitoring data is exempted from this risk.", + "risk_assessment": "The risk assessment is depending on the confidentiality and integrity rating of the transferred data asset either low or medium.", + "false_positives": "Technical assets missing the model entries of either processing or storing the mentioned data assets can be considered as false positives (incomplete models) after individual review. These should then be addressed by completing the model so that all necessary data assets are processed and/or stored by the technical asset involved.", + "function": "architecture", + "stride": "elevation-of-privilege", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "unnecessary-technical-asset": { + "id": "unnecessary-technical-asset", + "title": "Unnecessary Technical Asset", + "description": "When a technical asset does not process or store any data assets, this is an indicator for an unnecessary technical asset (or for an incomplete model). This is also the case if the asset has no communication links (either outgoing or incoming).", + "impact": "If this risk is unmitigated, attackers might be able to target unnecessary technical assets.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", + "action": "Attack Surface Reduction", + "mitigation": "Try to avoid using technical assets that do not process or store anything.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Technical assets not processing or storing any data assets.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "elevation-of-privilege", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "untrusted-deserialization": { + "id": "untrusted-deserialization", + "title": "Untrusted Deserialization", + "description": "When a technical asset accepts data in a specific serialized form (like Java or .NET serialization), Untrusted Deserialization risks might arise.\u003cbr\u003e\u003cbr\u003eSee \u003ca href=\"https://christian-schneider.net/JavaDeserializationSecurityFAQ.html\"\u003ehttps://christian-schneider.net/JavaDeserializationSecurityFAQ.html\u003c/a\u003e for more details.", + "impact": "If this risk is unmitigated, attackers might be able to execute code on target systems by exploiting untrusted deserialization endpoints.", + "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html", + "action": "Prevention of Deserialization of Untrusted Data", + "mitigation": "Try to avoid the deserialization of untrusted data (even of data within the same trust-boundary as long as it is sent across a remote connection) in order to stay safe from Untrusted Deserialization vulnerabilities. Alternatively a strict whitelisting approach of the classes/types/values to deserialize might help as well. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets accepting serialization data formats (including EJB and RMI protocols).", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", + "false_positives": "Fully trusted (i.e. cryptographically signed or similar) data deserialized can be considered as false positives after individual review.", + "function": "architecture", + "stride": "tampering", + "cwe": 502 + }, + "wrong-communication-link-content": { + "id": "wrong-communication-link-content", + "title": "Wrong Communication Link Content", + "description": "When a communication link is defined as readonly, but does not receive any data asset, or when it is defined as not readonly, but does not send any data asset, it is likely to be a model failure.", + "impact": "If this potential model error is not fixed, some risks might not be visible.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html", + "action": "Model Consistency", + "mitigation": "Try to model the correct readonly flag and/or data sent/received of communication links. Also try to use communication link types matching the target technology/machine types.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Communication links with inconsistent data assets being sent/received not matching their readonly flag or otherwise inconsistent protocols not matching the target technology type.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "information-disclosure", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "wrong-trust-boundary-content": { + "id": "wrong-trust-boundary-content", + "title": "Wrong Trust Boundary Content", + "description": "When a trust boundary of type network-policy-namespace-isolation contains non-container assets it is likely to be a model failure.", + "impact": "If this potential model error is not fixed, some risks might not be visible.", + "asvs": "V1 - Architecture, Design and Threat Modeling Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html", + "action": "Model Consistency", + "mitigation": "Try to model the correct types of trust boundaries and data assets.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "Trust boundaries which should only contain containers, but have different assets inside.", + "risk_assessment": "low", + "false_positives": "Usually no false positives as this looks like an incomplete model.", + "function": "architecture", + "stride": "elevation-of-privilege", + "model_failure_possible_reason": true, + "cwe": 1008 + }, + "xml-external-entity": { + "id": "xml-external-entity", + "title": "XML External Entity (XXE)", + "description": "When a technical asset accepts data in XML format, XML External Entity (XXE) risks might arise.", + "impact": "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, business data files, etc.) form the filesystem of affected components and/or access sensitive services or files of other components.", + "asvs": "V14 - Configuration Verification Requirements", + "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html", + "action": "XML Parser Hardening", + "mitigation": "Apply hardening of all XML parser instances in order to stay safe from XML External Entity (XXE) vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.", + "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", + "detection_logic": "In-scope technical assets accepting XML data formats.", + "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored. Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF (and XXE vulnerabilities are often also SSRF vulnerabilities).", + "false_positives": "Fully trusted (i.e. cryptographically signed or similar) XML data can be considered as false positives after individual review.", + "function": "development", + "stride": "information-disclosure", + "cwe": 611 + } + }, + "risk_tracking": { + "dos-risky-access-across-trust-boundary@*@*@*": { + "synthetic_risk_id": "dos-risky-access-across-trust-boundary@*@*@*", + "justification": "The hardening measures are being implemented and checked", + "ticket": "XYZ-1234", + "checked_by": "John Doe", + "status": "in-progress", + "date": "2020-01-04T00:00:00Z" + }, + "ldap-injection@*@ldap-auth-server@*": { + "synthetic_risk_id": "ldap-injection@*@ldap-auth-server@*", + "justification": "The hardening measures were implemented and checked", + "ticket": "XYZ-5678", + "checked_by": "John Doe", + "status": "mitigated", + "date": "2020-01-05T00:00:00Z" + }, + "missing-authentication-second-factor@*@*@*": { + "synthetic_risk_id": "missing-authentication-second-factor@*@*@*", + "justification": "The hardening measures were implemented and checked", + "ticket": "XYZ-1234", + "checked_by": "John Doe", + "status": "mitigated", + "date": "2020-01-04T00:00:00Z" + }, + "missing-hardening@*": { + "synthetic_risk_id": "missing-hardening@*", + "justification": "The hardening measures were implemented and checked", + "ticket": "XYZ-1234", + "checked_by": "John Doe", + "status": "mitigated", + "date": "2020-01-04T00:00:00Z" + }, + "unencrypted-asset@*": { + "synthetic_risk_id": "unencrypted-asset@*", + "justification": "The hardening measures were implemented and checked", + "ticket": "XYZ-1234", + "checked_by": "John Doe", + "status": "mitigated", + "date": "2020-01-04T00:00:00Z" + }, + "untrusted-deserialization@erp-system": { + "synthetic_risk_id": "untrusted-deserialization@erp-system", + "justification": "Risk accepted as tolerable", + "ticket": "XYZ-1234", + "checked_by": "John Doe", + "status": "accepted", + "date": "2020-01-04T00:00:00Z" + } + }, + "communication_links": { + "apache-webserver\u003eauth-credential-check-traffic": { + "id": "apache-webserver\u003eauth-credential-check-traffic", + "source_id": "apache-webserver", + "target_id": "identity-provider", + "title": "Auth Credential Check Traffic", + "description": "Link to the identity provider server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "apache-webserver\u003eerp-system-traffic": { + "id": "apache-webserver\u003eerp-system-traffic", + "source_id": "apache-webserver", + "target_id": "erp-system", + "title": "ERP System Traffic", + "description": "Link to the ERP system", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "backend-admin-client\u003edb-update-access": { + "id": "backend-admin-client\u003edb-update-access", + "source_id": "backend-admin-client", + "target_id": "sql-database", + "title": "DB Update Access", + "description": "Link to the database (JDBC tunneled via SSH)", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "db-dumps" + ], + "data_assets_received": [ + "db-dumps", + "erp-logs", + "customer-accounts", + "customer-operational-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "backend-admin-client\u003eerp-web-access": { + "id": "backend-admin-client\u003eerp-web-access", + "source_id": "backend-admin-client", + "target_id": "erp-system", + "title": "ERP Web Access", + "description": "Link to the ERP system (Web)", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "erp-customizing" + ], + "data_assets_received": [ + "erp-logs" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "backend-admin-client\u003euser-management-access": { + "id": "backend-admin-client\u003euser-management-access", + "source_id": "backend-admin-client", + "target_id": "ldap-auth-server", + "title": "User Management Access", + "description": "Link to the LDAP auth server for managing users", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "backoffice-client\u003eerp-internal-access": { + "id": "backoffice-client\u003eerp-internal-access", + "source_id": "backoffice-client", + "target_id": "erp-system", + "title": "ERP Internal Access", + "description": "Link to the ERP system", + "protocol": "https", + "tags": [ + "some-erp" + ], + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "internal-business-data" + ], + "data_assets_received": [ + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "backoffice-client\u003emarketing-cms-editing": { + "id": "backoffice-client\u003emarketing-cms-editing", + "source_id": "backoffice-client", + "target_id": "marketing-cms", + "title": "Marketing CMS Editing", + "description": "Link to the CMS for editing content", + "protocol": "https", + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "marketing-material" + ], + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "customer-client\u003ecustomer-traffic": { + "id": "customer-client\u003ecustomer-traffic", + "source_id": "customer-client", + "target_id": "load-balancer", + "title": "Customer Traffic", + "description": "Link to the load balancer", + "protocol": "https", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code", + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "erp-system\u003edatabase-traffic": { + "id": "erp-system\u003edatabase-traffic", + "source_id": "erp-system", + "target_id": "sql-database", + "title": "Database Traffic", + "description": "Link to the DB system", + "protocol": "jdbc", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "erp-system\u003enfs-filesystem-access": { + "id": "erp-system\u003enfs-filesystem-access", + "source_id": "erp-system", + "target_id": "contract-fileserver", + "title": "NFS Filesystem Access", + "description": "Link to the file system", + "protocol": "nfs", + "data_assets_sent": [ + "customer-contracts" + ], + "data_assets_received": [ + "customer-contracts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "external-dev-client\u003egit-repo-code-write-access": { + "id": "external-dev-client\u003egit-repo-code-write-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Code Write Access", + "description": "Link to the Git repo", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "external-dev-client\u003egit-repo-web-ui-access": { + "id": "external-dev-client\u003egit-repo-web-ui-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Web-UI Access", + "description": "Link to the Git repo", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "external-dev-client\u003ejenkins-web-ui-access": { + "id": "external-dev-client\u003ejenkins-web-ui-access", + "source_id": "external-dev-client", + "target_id": "jenkins-buildserver", + "title": "Jenkins Web-UI Access", + "description": "Link to the Jenkins build server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "build-job-config" + ], + "data_assets_received": [ + "build-job-config" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "identity-provider\u003eldap-credential-check-traffic": { + "id": "identity-provider\u003eldap-credential-check-traffic", + "source_id": "identity-provider", + "target_id": "ldap-auth-server", + "title": "LDAP Credential Check Traffic", + "description": "Link to the LDAP server", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "jenkins-buildserver\u003eapplication-deployment": { + "id": "jenkins-buildserver\u003eapplication-deployment", + "source_id": "jenkins-buildserver", + "target_id": "apache-webserver", + "title": "Application Deployment", + "description": "Link to the Apache webserver", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "jenkins-buildserver\u003ecms-updates": { + "id": "jenkins-buildserver\u003ecms-updates", + "source_id": "jenkins-buildserver", + "target_id": "marketing-cms", + "title": "CMS Updates", + "description": "Link to the CMS", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "jenkins-buildserver\u003egit-repo-code-read-access": { + "id": "jenkins-buildserver\u003egit-repo-code-read-access", + "source_id": "jenkins-buildserver", + "target_id": "git-repo", + "title": "Git Repo Code Read Access", + "description": "Link to the Git repository server", + "protocol": "ssh", + "readonly": true, + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "load-balancer\u003ecms-content-traffic": { + "id": "load-balancer\u003ecms-content-traffic", + "source_id": "load-balancer", + "target_id": "marketing-cms", + "title": "CMS Content Traffic", + "description": "Link to the CMS server", + "protocol": "http", + "readonly": true, + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "load-balancer\u003eweb-application-traffic": { + "id": "load-balancer\u003eweb-application-traffic", + "source_id": "load-balancer", + "target_id": "apache-webserver", + "title": "Web Application Traffic", + "description": "Link to the web server", + "protocol": "http", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + "marketing-cms\u003eauth-traffic": { + "id": "marketing-cms\u003eauth-traffic", + "source_id": "marketing-cms", + "target_id": "ldap-auth-server", + "title": "Auth Traffic", + "description": "Link to the LDAP auth server", + "protocol": "ldap", + "readonly": true, + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + }, + "diagram_tweak_nodesep": 2, + "diagram_tweak_ranksep": 2, + "incoming_technical_communication_links_mapped_by_target_id": { + "apache-webserver": [ + { + "id": "load-balancer\u003eweb-application-traffic", + "source_id": "load-balancer", + "target_id": "apache-webserver", + "title": "Web Application Traffic", + "description": "Link to the web server", + "protocol": "http", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "jenkins-buildserver\u003eapplication-deployment", + "source_id": "jenkins-buildserver", + "target_id": "apache-webserver", + "title": "Application Deployment", + "description": "Link to the Apache webserver", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "contract-fileserver": [ + { + "id": "erp-system\u003enfs-filesystem-access", + "source_id": "erp-system", + "target_id": "contract-fileserver", + "title": "NFS Filesystem Access", + "description": "Link to the file system", + "protocol": "nfs", + "data_assets_sent": [ + "customer-contracts" + ], + "data_assets_received": [ + "customer-contracts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "erp-system": [ + { + "id": "backend-admin-client\u003eerp-web-access", + "source_id": "backend-admin-client", + "target_id": "erp-system", + "title": "ERP Web Access", + "description": "Link to the ERP system (Web)", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "erp-customizing" + ], + "data_assets_received": [ + "erp-logs" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backoffice-client\u003eerp-internal-access", + "source_id": "backoffice-client", + "target_id": "erp-system", + "title": "ERP Internal Access", + "description": "Link to the ERP system", + "protocol": "https", + "tags": [ + "some-erp" + ], + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "internal-business-data" + ], + "data_assets_received": [ + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "apache-webserver\u003eerp-system-traffic", + "source_id": "apache-webserver", + "target_id": "erp-system", + "title": "ERP System Traffic", + "description": "Link to the ERP system", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "git-repo": [ + { + "id": "external-dev-client\u003egit-repo-code-write-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Code Write Access", + "description": "Link to the Git repo", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "external-dev-client\u003egit-repo-web-ui-access", + "source_id": "external-dev-client", + "target_id": "git-repo", + "title": "Git-Repo Web-UI Access", + "description": "Link to the Git repo", + "protocol": "https", + "authentication": "token", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "client-application-code", + "server-application-code" + ], + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "jenkins-buildserver\u003egit-repo-code-read-access", + "source_id": "jenkins-buildserver", + "target_id": "git-repo", + "title": "Git Repo Code Read Access", + "description": "Link to the Git repository server", + "protocol": "ssh", + "readonly": true, + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_received": [ + "client-application-code", + "server-application-code" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "identity-provider": [ + { + "id": "apache-webserver\u003eauth-credential-check-traffic", + "source_id": "apache-webserver", + "target_id": "identity-provider", + "title": "Auth Credential Check Traffic", + "description": "Link to the identity provider server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "jenkins-buildserver": [ + { + "id": "external-dev-client\u003ejenkins-web-ui-access", + "source_id": "external-dev-client", + "target_id": "jenkins-buildserver", + "title": "Jenkins Web-UI Access", + "description": "Link to the Jenkins build server", + "protocol": "https", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "build-job-config" + ], + "data_assets_received": [ + "build-job-config" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "ldap-auth-server": [ + { + "id": "identity-provider\u003eldap-credential-check-traffic", + "source_id": "identity-provider", + "target_id": "ldap-auth-server", + "title": "LDAP Credential Check Traffic", + "description": "Link to the LDAP server", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "marketing-cms\u003eauth-traffic", + "source_id": "marketing-cms", + "target_id": "ldap-auth-server", + "title": "Auth Traffic", + "description": "Link to the LDAP auth server", + "protocol": "ldap", + "readonly": true, + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backend-admin-client\u003euser-management-access", + "source_id": "backend-admin-client", + "target_id": "ldap-auth-server", + "title": "User Management Access", + "description": "Link to the LDAP auth server for managing users", + "protocol": "ldaps", + "authentication": "credentials", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "customer-accounts" + ], + "data_assets_received": [ + "customer-accounts" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "load-balancer": [ + { + "id": "customer-client\u003ecustomer-traffic", + "source_id": "customer-client", + "target_id": "load-balancer", + "title": "Customer Traffic", + "description": "Link to the load balancer", + "protocol": "https", + "authentication": "session-id", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "customer-contracts", + "client-application-code", + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "marketing-cms": [ + { + "id": "load-balancer\u003ecms-content-traffic", + "source_id": "load-balancer", + "target_id": "marketing-cms", + "title": "CMS Content Traffic", + "description": "Link to the CMS server", + "protocol": "http", + "readonly": true, + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "backoffice-client\u003emarketing-cms-editing", + "source_id": "backoffice-client", + "target_id": "marketing-cms", + "title": "Marketing CMS Editing", + "description": "Link to the CMS for editing content", + "protocol": "https", + "vpn": true, + "authentication": "token", + "authorization": "enduser-identity-propagation", + "data_assets_sent": [ + "marketing-material" + ], + "data_assets_received": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "jenkins-buildserver\u003ecms-updates", + "source_id": "jenkins-buildserver", + "target_id": "marketing-cms", + "title": "CMS Updates", + "description": "Link to the CMS", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "marketing-material" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ], + "sql-database": [ + { + "id": "backend-admin-client\u003edb-update-access", + "source_id": "backend-admin-client", + "target_id": "sql-database", + "title": "DB Update Access", + "description": "Link to the database (JDBC tunneled via SSH)", + "protocol": "ssh", + "authentication": "client-certificate", + "authorization": "technical-user", + "usage": "devops", + "data_assets_sent": [ + "db-dumps" + ], + "data_assets_received": [ + "db-dumps", + "erp-logs", + "customer-accounts", + "customer-operational-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + }, + { + "id": "erp-system\u003edatabase-traffic", + "source_id": "erp-system", + "target_id": "sql-database", + "title": "Database Traffic", + "description": "Link to the DB system", + "protocol": "jdbc", + "authentication": "credentials", + "authorization": "technical-user", + "data_assets_sent": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "data_assets_received": [ + "customer-accounts", + "customer-operational-data", + "internal-business-data" + ], + "diagram_tweak_weight": 1, + "diagram_tweak_constraint": true + } + ] + }, + "direct_containing_trust_boundary_mapped_by_technical_asset_id": { + "apache-webserver": { + "id": "web-dmz", + "title": "Web DMZ", + "description": "Web DMZ", + "type": "network-cloud-security-group", + "technical_assets_inside": [ + "apache-webserver", + "marketing-cms" + ] + }, + "backend-admin-client": { + "id": "dev-network", + "title": "Dev Network", + "description": "Development Network", + "technical_assets_inside": [ + "jenkins-buildserver", + "git-repo", + "backend-admin-client", + "backoffice-client" + ] + }, + "backoffice-client": { + "id": "dev-network", + "title": "Dev Network", + "description": "Development Network", + "technical_assets_inside": [ + "jenkins-buildserver", + "git-repo", + "backend-admin-client", + "backoffice-client" + ] + }, + "contract-fileserver": { + "id": "erp-dmz", + "title": "ERP DMZ", + "description": "ERP DMZ", + "type": "network-cloud-security-group", + "tags": [ + "some-erp" + ], + "technical_assets_inside": [ + "erp-system", + "contract-fileserver", + "sql-database" + ] + }, + "erp-system": { + "id": "erp-dmz", + "title": "ERP DMZ", + "description": "ERP DMZ", + "type": "network-cloud-security-group", + "tags": [ + "some-erp" + ], + "technical_assets_inside": [ + "erp-system", + "contract-fileserver", + "sql-database" + ] + }, + "git-repo": { + "id": "dev-network", + "title": "Dev Network", + "description": "Development Network", + "technical_assets_inside": [ + "jenkins-buildserver", + "git-repo", + "backend-admin-client", + "backoffice-client" + ] + }, + "identity-provider": { + "id": "auth-env", + "title": "Auth Handling Environment", + "description": "Auth Handling Environment", + "type": "execution-environment", + "technical_assets_inside": [ + "identity-provider", + "ldap-auth-server" + ] + }, + "jenkins-buildserver": { + "id": "dev-network", + "title": "Dev Network", + "description": "Development Network", + "technical_assets_inside": [ + "jenkins-buildserver", + "git-repo", + "backend-admin-client", + "backoffice-client" + ] + }, + "ldap-auth-server": { + "id": "auth-env", + "title": "Auth Handling Environment", + "description": "Auth Handling Environment", + "type": "execution-environment", + "technical_assets_inside": [ + "identity-provider", + "ldap-auth-server" + ] + }, + "load-balancer": { + "id": "application-network", + "title": "Application Network", + "description": "Application Network", + "type": "network-cloud-provider", + "tags": [ + "aws" + ], + "technical_assets_inside": [ + "load-balancer" + ], + "trust_boundaries_nested": [ + "web-dmz", + "erp-dmz", + "auth-env" + ] + }, + "marketing-cms": { + "id": "web-dmz", + "title": "Web DMZ", + "description": "Web DMZ", + "type": "network-cloud-security-group", + "technical_assets_inside": [ + "apache-webserver", + "marketing-cms" + ] + }, + "sql-database": { + "id": "erp-dmz", + "title": "ERP DMZ", + "description": "ERP DMZ", + "type": "network-cloud-security-group", + "tags": [ + "some-erp" + ], + "technical_assets_inside": [ + "erp-system", + "contract-fileserver", + "sql-database" + ] + } + }, + "generated_risks_by_category": { + "something-strange": [ + { + "category": "something-strange", + "severity": "critical", + "exploitation_likelihood": "likely", + "exploitation_impact": "medium", + "title": "\u003cb\u003eExample Individual Risk\u003c/b\u003e at \u003cb\u003eDatabase\u003c/b\u003e", + "synthetic_id": "something-strange@sql-database", + "most_relevant_technical_asset": "sql-database", + "data_breach_probability": "probable", + "data_breach_technical_assets": [ + "sql-database" + ] + }, + { + "category": "something-strange", + "severity": "medium", + "exploitation_likelihood": "frequent", + "exploitation_impact": "very-high", + "title": "\u003cb\u003eExample Individual Risk\u003c/b\u003e at \u003cb\u003eContract Filesystem\u003c/b\u003e", + "synthetic_id": "something-strange@contract-fileserver", + "most_relevant_technical_asset": "contract-fileserver" + } + ] + } +} \ No newline at end of file diff --git a/test/all.yaml b/test/all.yaml new file mode 100644 index 00000000..77815f77 --- /dev/null +++ b/test/all.yaml @@ -0,0 +1,1354 @@ +threagile_version: 1.0.0 + +# NOTE: +# +# For a perfect editing experience within your IDE of choice you can easily +# get model syntax validation and autocompletion (very handy for enum values) +# as well as live templates: Just import the schema.json into your IDE and assign +# it as "schema" to each Threagile YAML file. Also try to import individual parts +# from the live-templates.txt file into your IDE as live editing templates. +# +# You might also want to try the REST API when running in server mode... + + + +title: Some Example Application + +date: 2020-07-01 + +author: + name: John Doe + homepage: www.example.com + + + + +management_summary_comment: > + Just some more custom summary possible here... + +business_criticality: important # values: archive, operational, important, critical, mission-critical + + + + +business_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 + + +technical_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 + + + +questions: # simply use "" as answer to signal "unanswered" + How are the admin clients managed/protected against compromise?: "" + How are the development clients managed/protected against compromise?: > + Managed by XYZ + How are the build pipeline components managed/protected against compromise?: > + Managed by XYZ + + + +abuse_cases: + Denial-of-Service: > + As a hacker I want to disturb the functionality of the backend system in order to cause indirect + financial damage via unusable features. + CPU-Cycle Theft: > + As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners. + Ransomware: > + As a hacker I want to encrypt the storage and file systems in order to demand ransom. + Identity Theft: > + As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside. + PII Theft: > + As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage + their repudiation by publishing them. + + ERP-System Compromise: > + As a hacker I want to access the ERP-System in order to steal/modify sensitive business data. + Database Compromise: > + As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive + business data. + Contract Filesystem Compromise: > + As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data. + Cross-Site Scripting Attacks: > + As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and + cause reputational damage. + Denial-of-Service of Enduser Functionality: > + As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial + damage (lower sales). + Denial-of-Service of ERP/DB Functionality: > + As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect + financial damage via unusable internal ERP features (not related to customer portal). + + +security_requirements: + Input Validation: Strict input validation is required to reduce the overall attack surface. + Securing Administrative Access: Administrative access must be secured with strong encryption and multi-factor authentication. + EU-DSGVO: Mandatory EU-Datenschutzgrundverordnung + + +# Tags can be used for anything, it's just a tag. Also risk rules can act based on tags if you like. +# Tags can be used for example to name the products used (which is more concrete than the technology types that only specify the type) +tags_available: + - linux + - apache + - mysql + - jboss + - keycloak + - jenkins + - git + - oracle + - some-erp + - vmware + - aws + - aws:ec2 + - aws:s3 + + + + +data_assets: + + + Customer Contracts: &customer-contracts # this example shows the inheritance-like features of YAML + id: customer-contracts + description: Customer Contracts (PDF) + usage: business # values: business, devops + tags: + origin: Customer + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. + + + Customer Contract Summaries: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: contract-summaries + description: Customer Contract Summaries + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Just some summaries. + + + Customer Operational Data: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-operational-data + description: Customer Operational Data + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer operational data for using the portal are required to be available to offer the portal functionality + and are used in the backend transactions. + + + Customer Accounts: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-accounts + description: Customer Accounts (including transient credentials when entered for checking them) + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer account data for using the portal are required to be available to offer the portal functionality. + + + Some Internal Business Data: + id: internal-business-data + description: Internal business data of the ERP system used unrelated to the customer-facing processes. + usage: business # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for + internal non-customer-portal-related stuff). + + + Client Application Code: &client-application-code # this example shows the inheritance-like features of YAML + id: client-application-code + description: Angular and other client-side code delivered by the application. + usage: devops # values: business, devops + tags: + origin: Company ABC + owner: Company ABC + quantity: very-few # values: very-few, few, many, very-many + confidentiality: public # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the public data is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Server Application Code: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: server-application-code + description: API and other server-side code of the application. + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the API code is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Build Job Config: + id: build-job-config + description: Data for customizing of the build job system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the build job system. + + + Marketing Material: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: marketing-material + description: Website and marketing data to inform potential customers and generate new leads. + integrity: important # values: archive, operational, important, critical, mission-critical + + + ERP Logs: + id: erp-logs + description: Logs generated by the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: archive # values: archive, operational, important, critical, mission-critical + availability: archive # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard + transactional logs. + + + ERP Customizing Data: + id: erp-customizing + description: Data for customizing of the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the ERP system. + + + Database Customizing and Dumps: + id: db-dumps + description: Data for customizing of the DB system, which might include full database dumps. + usage: devops # values: business, devops + tags: + - oracle + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the DB system, which might include full database dumps. + + + + + + +technical_assets: + + + Customer Web Client: + id: customer-client + description: Customer Web Client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by enduser customer + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Customer + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by the customer to access the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Customer Traffic: + target: load-balancer + description: Link to the load balancer + protocol: https # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backoffice Client: + id: backoffice-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backoffice client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by Company XYZ company + size: component # values: system, service, application, component + technology: desktop # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer and use the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-contracts + - internal-business-data + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Internal Access: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + - some-erp + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Marketing CMS Editing: + target: marketing-cms + description: Link to the CMS for editing content + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backend Admin Client: + id: backend-admin-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backend admin client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by ops provider + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Web Access: + target: erp-system + description: Link to the ERP system (Web) + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - erp-customizing + data_assets_received: # sequence of IDs to reference + - erp-logs + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + DB Update Access: + target: sql-database + description: Link to the database (JDBC tunneled via SSH) + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - db-dumps + data_assets_received: # sequence of IDs to reference + - db-dumps + - erp-logs + - customer-accounts + - customer-operational-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + User Management Access: + target: ldap-auth-server + description: Link to the LDAP auth server for managing users + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Load Balancer: + id: load-balancer + #diagram_tweak_order: 50 # affects left to right positioning (only within a trust boundary) + description: Load Balancer (HA-Proxy) + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: load-balancer # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ + usages of the portal and ERP system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Web Application Traffic: + target: apache-webserver + description: Link to the web server + protocol: http # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + CMS Content Traffic: + target: marketing-cms + description: Link to the CMS server + protocol: http # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + + + Apache Webserver: + id: apache-webserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Apache Webserver hosting the API code and client-side code + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: web-server # values: see help + tags: + - linux + - apache + - aws:ec2 + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - json + - file + communication_links: + ERP System Traffic: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + Auth Credential Check Traffic: + target: identity-provider + description: Link to the identity provider server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + Identity Provider: + id: identity-provider + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Identity provider server + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-provider # values: see help + tags: + - linux + - jboss + - keycloak + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + LDAP Credential Check Traffic: + target: ldap-auth-server + description: Link to the LDAP server + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + LDAP Auth Server: + id: ldap-auth-server + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: LDAP authentication server + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-store-ldap # values: see help + tags: + - linux + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: transparent # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - customer-accounts + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + + + Marketing CMS: + id: marketing-cms + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: CMS for the marketing content + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: cms # values: see help + tags: + - linux + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - marketing-material + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Auth Traffic: + target: ldap-auth-server + description: Link to the LDAP auth server + protocol: ldap # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + + + Backoffice ERP System: + id: erp-system + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: ERP system + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: erp # values: see help + tags: + - linux + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other + Company XYZ internal processes. + multi_tenant: false + redundant: true + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - erp-customizing + data_assets_stored: # sequence of IDs to reference + - erp-logs + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - xml + - file + - serialization + communication_links: + Database Traffic: + target: sql-database + description: Link to the DB system + protocol: jdbc # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + NFS Filesystem Access: + target: contract-fileserver + description: Link to the file system + protocol: nfs # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-contracts + data_assets_received: # sequence of IDs to reference + - customer-contracts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Contract Fileserver: + id: contract-fileserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: NFS Filesystem for storing the contract PDFs + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: file-server # values: see help + tags: + - linux + - aws:s3 + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. The filesystem is also required to be available + for storing new contracts of freshly generated customers. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + data_assets_stored: # sequence of IDs to reference + - customer-contracts + - contract-summaries + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + + + Customer Contract Database: + id: sql-database + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: The database behind the ERP system + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: database # values: see help + tags: + - linux + - mysql + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: data-with-symmetric-shared-key # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also + for other Company XYZ internal processes. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - db-dumps + data_assets_stored: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + + + External Development Client: + id: external-dev-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: External developer client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by external developers + size: system # values: system, service, application, component + technology: devops-client # values: see help + tags: + - linux + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: External Developers + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The clients used by external developers to create parts of the application code. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + Git-Repo Code Write Access: + target: git-repo + description: Link to the Git repo + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Git-Repo Web-UI Access: + target: git-repo + description: Link to the Git repo + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Jenkins Web-UI Access: + target: jenkins-buildserver + description: Link to the Jenkins build server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - build-job-config + data_assets_received: # sequence of IDs to reference + - build-job-config + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Git Repository: + id: git-repo + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Git repository server + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: sourcecode-repository # values: see help + tags: + - linux + - git + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + + + Jenkins Buildserver: + id: jenkins-buildserver + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Jenkins buildserver + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: build-pipeline # values: see help + tags: + - linux + - jenkins + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk + of reputation damage and application update unavailability when the build pipeline is compromised. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + - serialization + communication_links: + Git Repo Code Read Access: + target: git-repo + description: Link to the Git repository server + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Application Deployment: + target: apache-webserver + description: Link to the Apache webserver + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + CMS Updates: + target: marketing-cms + description: Link to the CMS + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + + + + + +trust_boundaries: + + + Web DMZ: + id: web-dmz + description: Web DMZ + type: network-cloud-security-group # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - apache-webserver + - marketing-cms + trust_boundaries_nested: # sequence of IDs to reference + + + ERP DMZ: + id: erp-dmz + description: ERP DMZ + type: network-cloud-security-group # values: see help + tags: + - some-erp + technical_assets_inside: # sequence of IDs to reference + - erp-system + - contract-fileserver + - sql-database + trust_boundaries_nested: # sequence of IDs to reference + + + Application Network: + id: application-network + description: Application Network + type: network-cloud-provider # values: see help + tags: + - aws + technical_assets_inside: # sequence of IDs to reference + - load-balancer + trust_boundaries_nested: # sequence of IDs to reference + - web-dmz + - erp-dmz + - auth-env + + + Auth Handling Environment: + id: auth-env + description: Auth Handling Environment + type: execution-environment # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - identity-provider + - ldap-auth-server + trust_boundaries_nested: # sequence of IDs to reference + + + Dev Network: + id: dev-network + description: Development Network + type: network-on-prem # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - jenkins-buildserver + - git-repo + - backend-admin-client + - backoffice-client + trust_boundaries_nested: # sequence of IDs to reference + + + + + +shared_runtimes: + + + WebApp and Backoffice Virtualization: + id: webapp-virtualization + description: WebApp Virtualization + tags: + - vmware + technical_assets_running: # sequence of IDs to reference + - apache-webserver + - marketing-cms + - erp-system + - contract-fileserver + - sql-database + + + + +individual_risk_categories: # used for adding custom manually identified risks + + + Some Individual Risk Example: + id: something-strange + description: Some text describing the risk category... + impact: Some text describing the impact... + asvs: V0 - Something Strange + cheat_sheet: https://example.com + action: Some text describing the action... + mitigation: Some text describing the mitigation... + check: Check if XYZ... + function: business-side # values: business-side, architecture, development, operations + stride: repudiation # values: spoofing, tampering, repudiation, information-disclosure, denial-of-service, elevation-of-privilege + detection_logic: Some text describing the detection logic... + risk_assessment: Some text describing the risk assessment... + false_positives: Some text describing the most common types of false positives... + model_failure_possible_reason: false + cwe: 693 + risks_identified: + Example Individual Risk at Database: + severity: critical # values: low, medium, elevated, high, critical + exploitation_likelihood: likely # values: unlikely, likely, very-likely, frequent + exploitation_impact: medium # values: low, medium, high, very-high + data_breach_probability: probable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + - sql-database + most_relevant_data_asset: + most_relevant_technical_asset: sql-database + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + Example Individual Risk at Contract Filesystem: + severity: medium # values: low, medium, elevated, high, critical + exploitation_likelihood: frequent # values: unlikely, likely, very-likely, frequent + exploitation_impact: very-high # values: low, medium, high, very-high + data_breach_probability: improbable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + most_relevant_data_asset: + most_relevant_technical_asset: contract-fileserver + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + + + +# NOTE: +# For risk tracking each risk-id needs to be defined (the string with the @ sign in it). These unique risk IDs +# are visible in the PDF report (the small grey string under each risk), the Excel (column "ID"), as well as the JSON responses. +# Some risk IDs have only one @ sign in them, while others multiple. The idea is to allow for unique but still speaking IDs. +# Therefore each risk instance creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. +# Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. Best is to lookup the IDs +# to use in the created Excel file. Alternatively a model macro "seed-risk-tracking" is available that helps in initially +# seeding the risk tracking part here based on already identified and not yet handled risks. +risk_tracking: + + untrusted-deserialization@erp-system: # wildcards "*" between the @ characters are possible + status: accepted # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: Risk accepted as tolerable + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + ldap-injection@*@ldap-auth-server@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-5678 + date: 2020-01-05 + checked_by: John Doe + + unencrypted-asset@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-authentication-second-factor@*@*@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-hardening@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + dos-risky-access-across-trust-boundary@*@*@*: # wildcards "*" between the @ characters are possible + status: in-progress # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures are being implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + + +#diagram_tweak_edge_layout: spline # values: spline, polyline, false, ortho (this suppresses edge labels), curved (this suppresses edge labels and can cause problems with edges) + +#diagram_tweak_suppress_edge_labels: true +#diagram_tweak_layout_left_to_right: true +#diagram_tweak_nodesep: 2 +#diagram_tweak_ranksep: 2 +#diagram_tweak_invisible_connections_between_assets: +# - tech-asset-source-id-A:tech-asset-target-id-B +# - tech-asset-source-id-C:tech-asset-target-id-D +#diagram_tweak_same_rank_assets: +# - tech-asset-source-id-E:tech-asset-target-id-F:tech-asset-source-id-G:tech-asset-target-id-H +# - tech-asset-source-id-M:tech-asset-target-id-N:tech-asset-source-id-O diff --git a/test/data_assets.yaml b/test/data_assets.yaml new file mode 100644 index 00000000..43bc0c7d --- /dev/null +++ b/test/data_assets.yaml @@ -0,0 +1,164 @@ + +data_assets: + + + Customer Contracts: &customer-contracts # this example shows the inheritance-like features of YAML + id: customer-contracts + description: Customer Contracts (PDF) + usage: business # values: business, devops + tags: + origin: Customer + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. + + + Customer Contract Summaries: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: contract-summaries + description: Customer Contract Summaries + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Just some summaries. + + + Customer Operational Data: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-operational-data + description: Customer Operational Data + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer operational data for using the portal are required to be available to offer the portal functionality + and are used in the backend transactions. + + + Customer Accounts: + <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values + id: customer-accounts + description: Customer Accounts (including transient credentials when entered for checking them) + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Customer account data for using the portal are required to be available to offer the portal functionality. + + + Some Internal Business Data: + id: internal-business-data + description: Internal business data of the ERP system used unrelated to the customer-facing processes. + usage: business # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for + internal non-customer-portal-related stuff). + + + Client Application Code: &client-application-code # this example shows the inheritance-like features of YAML + id: client-application-code + description: Angular and other client-side code delivered by the application. + usage: devops # values: business, devops + tags: + origin: Company ABC + owner: Company ABC + quantity: very-few # values: very-few, few, many, very-many + confidentiality: public # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the public data is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Server Application Code: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: server-application-code + description: API and other server-side code of the application. + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The integrity of the API code is critical to avoid reputational damage and the availability is important on the + long-term scale (but not critical) to keep the growth rate of the customer base steady. + + + Build Job Config: + id: build-job-config + description: Data for customizing of the build job system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the build job system. + + + Marketing Material: + <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values + id: marketing-material + description: Website and marketing data to inform potential customers and generate new leads. + integrity: important # values: archive, operational, important, critical, mission-critical + + + ERP Logs: + id: erp-logs + description: Logs generated by the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: many # values: very-few, few, many, very-many + confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential + integrity: archive # values: archive, operational, important, critical, mission-critical + availability: archive # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard + transactional logs. + + + ERP Customizing Data: + id: erp-customizing + description: Data for customizing of the ERP system. + usage: devops # values: business, devops + tags: + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the ERP system. + + + Database Customizing and Dumps: + id: db-dumps + description: Data for customizing of the DB system, which might include full database dumps. + usage: devops # values: business, devops + tags: + - oracle + origin: Company XYZ + owner: Company XYZ + quantity: very-few # values: very-few, few, many, very-many + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Data for customizing of the DB system, which might include full database dumps. + diff --git a/test/diagram_tweak.yaml b/test/diagram_tweak.yaml new file mode 100644 index 00000000..eff09615 --- /dev/null +++ b/test/diagram_tweak.yaml @@ -0,0 +1,13 @@ + +#diagram_tweak_edge_layout: spline # values: spline, polyline, false, ortho (this suppresses edge labels), curved (this suppresses edge labels and can cause problems with edges) + +#diagram_tweak_suppress_edge_labels: true +#diagram_tweak_layout_left_to_right: true +#diagram_tweak_nodesep: 2 +#diagram_tweak_ranksep: 2 +#diagram_tweak_invisible_connections_between_assets: +# - tech-asset-source-id-A:tech-asset-target-id-B +# - tech-asset-source-id-C:tech-asset-target-id-D +#diagram_tweak_same_rank_assets: +# - tech-asset-source-id-E:tech-asset-target-id-F:tech-asset-source-id-G:tech-asset-target-id-H +# - tech-asset-source-id-M:tech-asset-target-id-N:tech-asset-source-id-O diff --git a/test/main.yaml b/test/main.yaml new file mode 100644 index 00000000..a90256a3 --- /dev/null +++ b/test/main.yaml @@ -0,0 +1,27 @@ +threagile_version: 1.0.0 + +# NOTE: +# +# For a perfect editing experience within your IDE of choice you can easily +# get model syntax validation and autocompletion (very handy for enum values) +# as well as live templates: Just import the schema.json into your IDE and assign +# it as "schema" to each Threagile YAML file. Also try to import individual parts +# from the live-templates.txt file into your IDE as live editing templates. +# +# You might also want to try the REST API when running in server mode... + +includes: + - meta.yaml + - overview.yaml + - questions.yaml + - abuse_cases.yaml + - security_requirements.yaml + - tags.yaml + - data_assets.yaml + - technical_assets.yaml + - trust_boundaries.yaml + - shared_runtimes.yaml + - risk_categories.yaml + - risk_tracking.yaml + - diagram_tweak.yaml + diff --git a/test/meta.yaml b/test/meta.yaml new file mode 100644 index 00000000..c368ce1a --- /dev/null +++ b/test/meta.yaml @@ -0,0 +1,9 @@ +title: Some Example Application + +date: 2020-07-01 + +author: + name: John Doe + homepage: www.example.com + +business_criticality: important # values: archive, operational, important, critical, mission-critical diff --git a/test/overview.yaml b/test/overview.yaml new file mode 100644 index 00000000..52b83ac7 --- /dev/null +++ b/test/overview.yaml @@ -0,0 +1,16 @@ +management_summary_comment: > + Just some more custom summary possible here... + + +business_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 + + +technical_overview: + description: Some more demo text here and even images... + images: +# - custom-image-1.png: Some dummy image 1 +# - custom-image-2.png: Some dummy image 2 diff --git a/test/questions.yaml b/test/questions.yaml new file mode 100644 index 00000000..4f8fc38a --- /dev/null +++ b/test/questions.yaml @@ -0,0 +1,6 @@ +questions: # simply use "" as answer to signal "unanswered" + How are the admin clients managed/protected against compromise?: "" + How are the development clients managed/protected against compromise?: > + Managed by XYZ + How are the build pipeline components managed/protected against compromise?: > + Managed by XYZ diff --git a/test/risk_categories.yaml b/test/risk_categories.yaml new file mode 100644 index 00000000..e63c87e8 --- /dev/null +++ b/test/risk_categories.yaml @@ -0,0 +1,44 @@ + +individual_risk_categories: # used for adding custom manually identified risks + + Some Individual Risk Example: + id: something-strange + description: Some text describing the risk category... + impact: Some text describing the impact... + asvs: V0 - Something Strange + cheat_sheet: https://example.com + action: Some text describing the action... + mitigation: Some text describing the mitigation... + check: Check if XYZ... + function: business-side # values: business-side, architecture, development, operations + stride: repudiation # values: spoofing, tampering, repudiation, information-disclosure, denial-of-service, elevation-of-privilege + detection_logic: Some text describing the detection logic... + risk_assessment: Some text describing the risk assessment... + false_positives: Some text describing the most common types of false positives... + model_failure_possible_reason: false + cwe: 693 + risks_identified: + Example Individual Risk at Database: + severity: critical # values: low, medium, elevated, high, critical + exploitation_likelihood: likely # values: unlikely, likely, very-likely, frequent + exploitation_impact: medium # values: low, medium, high, very-high + data_breach_probability: probable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + - sql-database + most_relevant_data_asset: + most_relevant_technical_asset: sql-database + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + Example Individual Risk at Contract Filesystem: + severity: medium # values: low, medium, elevated, high, critical + exploitation_likelihood: frequent # values: unlikely, likely, very-likely, frequent + exploitation_impact: very-high # values: low, medium, high, very-high + data_breach_probability: improbable # values: improbable, possible, probable + data_breach_technical_assets: # list of technical asset IDs which might have data breach + most_relevant_data_asset: + most_relevant_technical_asset: contract-fileserver + most_relevant_communication_link: + most_relevant_trust_boundary: + most_relevant_shared_runtime: + diff --git a/test/risk_tracking.yaml b/test/risk_tracking.yaml new file mode 100644 index 00000000..30c84a11 --- /dev/null +++ b/test/risk_tracking.yaml @@ -0,0 +1,52 @@ + +# NOTE: +# For risk tracking each risk-id needs to be defined (the string with the @ sign in it). These unique risk IDs +# are visible in the PDF report (the small grey string under each risk), the Excel (column "ID"), as well as the JSON responses. +# Some risk IDs have only one @ sign in them, while others multiple. The idea is to allow for unique but still speaking IDs. +# Therefore each risk instance creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. +# Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. Best is to lookup the IDs +# to use in the created Excel file. Alternatively a model macro "seed-risk-tracking" is available that helps in initially +# seeding the risk tracking part here based on already identified and not yet handled risks. +risk_tracking: + + untrusted-deserialization@erp-system: # wildcards "*" between the @ characters are possible + status: accepted # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: Risk accepted as tolerable + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + ldap-injection@*@ldap-auth-server@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-5678 + date: 2020-01-05 + checked_by: John Doe + + unencrypted-asset@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-authentication-second-factor@*@*@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + missing-hardening@*: # wildcards "*" between the @ characters are possible + status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures were implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe + + dos-risky-access-across-trust-boundary@*@*@*: # wildcards "*" between the @ characters are possible + status: in-progress # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive + justification: The hardening measures are being implemented and checked + ticket: XYZ-1234 + date: 2020-01-04 + checked_by: John Doe diff --git a/test/security_requirements.yaml b/test/security_requirements.yaml new file mode 100644 index 00000000..d8905dfc --- /dev/null +++ b/test/security_requirements.yaml @@ -0,0 +1,4 @@ +security_requirements: + Input Validation: Strict input validation is required to reduce the overall attack surface. + Securing Administrative Access: Administrative access must be secured with strong encryption and multi-factor authentication. + EU-DSGVO: Mandatory EU-Datenschutzgrundverordnung diff --git a/test/shared_runtimes.yaml b/test/shared_runtimes.yaml new file mode 100644 index 00000000..c9bee4d6 --- /dev/null +++ b/test/shared_runtimes.yaml @@ -0,0 +1,16 @@ + +shared_runtimes: + + + WebApp and Backoffice Virtualization: + id: webapp-virtualization + description: WebApp Virtualization + tags: + - vmware + technical_assets_running: # sequence of IDs to reference + - apache-webserver + - marketing-cms + - erp-system + - contract-fileserver + - sql-database + diff --git a/test/tags.yaml b/test/tags.yaml new file mode 100644 index 00000000..527412e5 --- /dev/null +++ b/test/tags.yaml @@ -0,0 +1,16 @@ +# Tags can be used for anything, it's just a tag. Also risk rules can act based on tags if you like. +# Tags can be used for example to name the products used (which is more concrete than the technology types that only specify the type) +tags_available: + - linux + - apache + - mysql + - jboss + - keycloak + - jenkins + - git + - oracle + - some-erp + - vmware + - aws + - aws:ec2 + - aws:s3 diff --git a/test/technical_assets.yaml b/test/technical_assets.yaml new file mode 100644 index 00000000..427d2806 --- /dev/null +++ b/test/technical_assets.yaml @@ -0,0 +1,6 @@ +includes: + - technical_assets_clients.yaml + - technical_assets_infrastructure.yaml + - technical_assets_servers.yaml + - technical_assets_databases.yaml + - technical_assets_devops.yaml diff --git a/test/technical_assets_clients.yaml b/test/technical_assets_clients.yaml new file mode 100644 index 00000000..d9091f13 --- /dev/null +++ b/test/technical_assets_clients.yaml @@ -0,0 +1,211 @@ + +technical_assets: + + Customer Web Client: + id: customer-client + description: Customer Web Client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by enduser customer + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Customer + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by the customer to access the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Customer Traffic: + target: load-balancer + description: Link to the load balancer + protocol: https # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backoffice Client: + id: backoffice-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backoffice client + type: external-entity # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by Company XYZ company + size: component # values: system, service, application, component + technology: desktop # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer and use the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-contracts + - internal-business-data + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Internal Access: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + - some-erp + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Marketing CMS Editing: + target: marketing-cms + description: Link to the CMS for editing content + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: true + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Backend Admin Client: + id: backend-admin-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Backend admin client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by ops provider + size: component # values: system, service, application, component + technology: browser # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company XYZ + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: operational # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The client used by Company XYZ to administer the system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - erp-logs + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + ERP Web Access: + target: erp-system + description: Link to the ERP system (Web) + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - erp-customizing + data_assets_received: # sequence of IDs to reference + - erp-logs + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + DB Update Access: + target: sql-database + description: Link to the database (JDBC tunneled via SSH) + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - db-dumps + data_assets_received: # sequence of IDs to reference + - db-dumps + - erp-logs + - customer-accounts + - customer-operational-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + User Management Access: + target: ldap-auth-server + description: Link to the LDAP auth server for managing users + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false diff --git a/test/technical_assets_databases.yaml b/test/technical_assets_databases.yaml new file mode 100644 index 00000000..54fe362c --- /dev/null +++ b/test/technical_assets_databases.yaml @@ -0,0 +1,71 @@ + +technical_assets: + + LDAP Auth Server: + id: ldap-auth-server + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: LDAP authentication server + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-store-ldap # values: see help + tags: + - linux + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: transparent # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - customer-accounts + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + + + Customer Contract Database: + id: sql-database + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: The database behind the ERP system + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: database # values: see help + tags: + - linux + - mysql + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: data-with-symmetric-shared-key # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also + for other Company XYZ internal processes. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - db-dumps + data_assets_stored: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: diff --git a/test/technical_assets_devops.yaml b/test/technical_assets_devops.yaml new file mode 100644 index 00000000..a6c0aa49 --- /dev/null +++ b/test/technical_assets_devops.yaml @@ -0,0 +1,223 @@ +includes: + - technical_assets_clients.yaml + +technical_assets: + + External Development Client: + id: external-dev-client + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: External developer client + type: external-entity # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: true + out_of_scope: true + justification_out_of_scope: Owned and managed by external developers + size: system # values: system, service, application, component + technology: devops-client # values: see help + tags: + - linux + internet: true + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: External Developers + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: operational # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The clients used by external developers to create parts of the application code. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + Git-Repo Code Write Access: + target: git-repo + description: Link to the Git repo + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Git-Repo Web-UI Access: + target: git-repo + description: Link to the Git repo + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Jenkins Web-UI Access: + target: jenkins-buildserver + description: Link to the Jenkins build server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - build-job-config + data_assets_received: # sequence of IDs to reference + - build-job-config + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Git Repository: + id: git-repo + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Git repository server + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: sourcecode-repository # values: see help + tags: + - linux + - git + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: + + + Jenkins Buildserver: + id: jenkins-buildserver + #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary) + description: Jenkins buildserver + type: process # values: external-entity, process, datastore + usage: devops # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: build-pipeline # values: see help + tags: + - linux + - jenkins + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is + therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk + of reputation damage and application update unavailability when the build pipeline is compromised. + multi_tenant: true + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + - build-job-config + - client-application-code + - server-application-code + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + - serialization + communication_links: + Git Repo Code Read Access: + target: git-repo + description: Link to the Git repository server + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - client-application-code + - server-application-code + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + Application Deployment: + target: apache-webserver + description: Link to the Apache webserver + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - client-application-code + - server-application-code + data_assets_received: # sequence of IDs to reference + CMS Updates: + target: marketing-cms + description: Link to the CMS + protocol: ssh # values: see help + authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: devops # values: business, devops + data_assets_sent: # sequence of IDs to reference + - marketing-material + data_assets_received: # sequence of IDs to reference + diff --git a/test/technical_assets_infrastructure.yaml b/test/technical_assets_infrastructure.yaml new file mode 100644 index 00000000..00e7b697 --- /dev/null +++ b/test/technical_assets_infrastructure.yaml @@ -0,0 +1,75 @@ + +technical_assets: + + Load Balancer: + id: load-balancer + #diagram_tweak_order: 50 # affects left to right positioning (only within a trust boundary) + description: Load Balancer (HA-Proxy) + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: load-balancer # values: see help + tags: + internet: false + machine: physical # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ + usages of the portal and ERP system. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - marketing-material + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Web Application Traffic: + target: apache-webserver + description: Link to the web server + protocol: http # values: see help + authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - client-application-code + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + CMS Content Traffic: + target: marketing-cms + description: Link to the CMS server + protocol: http # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + data_assets_received: # sequence of IDs to reference + - marketing-material + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false diff --git a/test/technical_assets_servers.yaml b/test/technical_assets_servers.yaml new file mode 100644 index 00000000..b8e025b3 --- /dev/null +++ b/test/technical_assets_servers.yaml @@ -0,0 +1,295 @@ + +technical_assets: + + Apache Webserver: + id: apache-webserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Apache Webserver hosting the API code and client-side code + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: web-server # values: see help + tags: + - linux + - apache + - aws:ec2 + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - client-application-code + - server-application-code + data_assets_stored: # sequence of IDs to reference + - client-application-code + - server-application-code + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - json + - file + communication_links: + ERP System Traffic: + target: erp-system + description: Link to the ERP system + protocol: https # values: see help + authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + Auth Credential Check Traffic: + target: identity-provider + description: Link to the identity provider server + protocol: https # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + Identity Provider: + id: identity-provider + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: Identity provider server + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: identity-provider # values: see help + tags: + - linux + - jboss + - keycloak + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The auth data of the application + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + data_assets_stored: # sequence of IDs to reference + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + LDAP Credential Check Traffic: + target: ldap-auth-server + description: Link to the LDAP server + protocol: ldaps # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + + + Marketing CMS: + id: marketing-cms + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: CMS for the marketing content + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: application # values: system, service, application, component + technology: cms # values: see help + tags: + - linux + internet: false + machine: container # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential + integrity: important # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The correct configuration and reachability of the web server is mandatory for all customer usages of the portal. + multi_tenant: false + redundant: false + custom_developed_parts: true + data_assets_processed: # sequence of IDs to reference + - marketing-material + - customer-accounts + data_assets_stored: # sequence of IDs to reference + - marketing-material + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + communication_links: + Auth Traffic: + target: ldap-auth-server + description: Link to the LDAP auth server + protocol: ldap # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: true + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + data_assets_received: # sequence of IDs to reference + - customer-accounts + #diagram_tweak_weight: 5 + #diagram_tweak_constraint: false + + + Backoffice ERP System: + id: erp-system + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: ERP system + type: process # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: system # values: system, service, application, component + technology: erp # values: see help + tags: + - linux + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: mission-critical # values: archive, operational, important, critical, mission-critical + availability: mission-critical # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other + Company XYZ internal processes. + multi_tenant: false + redundant: true + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - customer-contracts + - internal-business-data + - erp-customizing + data_assets_stored: # sequence of IDs to reference + - erp-logs + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - xml + - file + - serialization + communication_links: + Database Traffic: + target: sql-database + description: Link to the DB system + protocol: jdbc # values: see help + authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: technical-user # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + data_assets_received: # sequence of IDs to reference + - customer-accounts + - customer-operational-data + - internal-business-data + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + NFS Filesystem Access: + target: contract-fileserver + description: Link to the file system + protocol: nfs # values: see help + authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor + authorization: none # values: none, technical-user, enduser-identity-propagation + tags: + vpn: false + ip_filtered: false + readonly: false + usage: business # values: business, devops + data_assets_sent: # sequence of IDs to reference + - customer-contracts + data_assets_received: # sequence of IDs to reference + - customer-contracts + #diagram_tweak_weight: 1 + #diagram_tweak_constraint: false + + + Contract Fileserver: + id: contract-fileserver + #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary) + description: NFS Filesystem for storing the contract PDFs + type: datastore # values: external-entity, process, datastore + usage: business # values: business, devops + used_as_client_by_human: false + out_of_scope: false + justification_out_of_scope: + size: component # values: system, service, application, component + technology: file-server # values: see help + tags: + - linux + - aws:s3 + internet: false + machine: virtual # values: physical, virtual, container, serverless + encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key + owner: Company ABC + confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential + integrity: critical # values: archive, operational, important, critical, mission-critical + availability: important # values: archive, operational, important, critical, mission-critical + justification_cia_rating: > + Contract data might contain financial data as well as personally identifiable information (PII). The integrity and + availability of contract data is required for clearing payment disputes. The filesystem is also required to be available + for storing new contracts of freshly generated customers. + multi_tenant: false + redundant: false + custom_developed_parts: false + data_assets_processed: # sequence of IDs to reference + data_assets_stored: # sequence of IDs to reference + - customer-contracts + - contract-summaries + data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv + - file + communication_links: diff --git a/test/trust_boundaries.yaml b/test/trust_boundaries.yaml new file mode 100644 index 00000000..b6ae70fd --- /dev/null +++ b/test/trust_boundaries.yaml @@ -0,0 +1,67 @@ + +trust_boundaries: + + + Web DMZ: + id: web-dmz + description: Web DMZ + type: network-cloud-security-group # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - apache-webserver + - marketing-cms + trust_boundaries_nested: # sequence of IDs to reference + + + ERP DMZ: + id: erp-dmz + description: ERP DMZ + type: network-cloud-security-group # values: see help + tags: + - some-erp + technical_assets_inside: # sequence of IDs to reference + - erp-system + - contract-fileserver + - sql-database + trust_boundaries_nested: # sequence of IDs to reference + + + Application Network: + id: application-network + description: Application Network + type: network-cloud-provider # values: see help + tags: + - aws + technical_assets_inside: # sequence of IDs to reference + - load-balancer + trust_boundaries_nested: # sequence of IDs to reference + - web-dmz + - erp-dmz + - auth-env + + + Auth Handling Environment: + id: auth-env + description: Auth Handling Environment + type: execution-environment # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - identity-provider + - ldap-auth-server + trust_boundaries_nested: # sequence of IDs to reference + + + Dev Network: + id: dev-network + description: Development Network + type: network-on-prem # values: see help + tags: + technical_assets_inside: # sequence of IDs to reference + - jenkins-buildserver + - git-repo + - backend-admin-client + - backoffice-client + trust_boundaries_nested: # sequence of IDs to reference + + +