+*/
+
+package model
+
+import (
+ "testing"
+
+ "github.com/google/uuid"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/threagile/threagile/pkg/input"
+ "github.com/threagile/threagile/pkg/security/risks"
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+func TestDefaultInputNotFail(t *testing.T) {
+ parsedModel, err := ParseModel(createInputModel(make(map[string]input.TechnicalAsset), make(map[string]input.DataAsset)), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+
+ assert.NoError(t, err)
+ assert.NotNil(t, parsedModel)
+}
+
+func TestInferConfidentiality_NotSet_NoOthers_ExpectTODO(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+ // TODO: rename test and check if everyone agree that by default it should be public if there are no other assets
+
+ assert.NoError(t, err)
+}
+
+func TestInferConfidentiality_ExpectHighestConfidentiality(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ daConfidentialConfidentiality := createDataAsset(types.Confidential, types.Critical, types.Critical)
+ da[daConfidentialConfidentiality.ID] = daConfidentialConfidentiality
+
+ daRestrictedConfidentiality := createDataAsset(types.Restricted, types.Important, types.Important)
+ da[daRestrictedConfidentiality.ID] = daRestrictedConfidentiality
+
+ daPublicConfidentiality := createDataAsset(types.Public, types.Archive, types.Archive)
+ da[daPublicConfidentiality.ID] = daPublicConfidentiality
+
+ taWithConfidentialConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithConfidentialConfidentialityDataAsset.DataAssetsProcessed = append(taWithConfidentialConfidentialityDataAsset.DataAssetsProcessed, daConfidentialConfidentiality.ID)
+ ta[taWithConfidentialConfidentialityDataAsset.ID] = taWithConfidentialConfidentialityDataAsset
+
+ taWithRestrictedConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithRestrictedConfidentialityDataAsset.DataAssetsProcessed = append(taWithRestrictedConfidentialityDataAsset.DataAssetsProcessed, daRestrictedConfidentiality.ID)
+ ta[taWithRestrictedConfidentialityDataAsset.ID] = taWithRestrictedConfidentialityDataAsset
+
+ taWithPublicConfidentialityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithPublicConfidentialityDataAsset.DataAssetsProcessed = append(taWithPublicConfidentialityDataAsset.DataAssetsProcessed, daPublicConfidentiality.ID)
+ ta[taWithPublicConfidentialityDataAsset.ID] = taWithPublicConfidentialityDataAsset
+
+ parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+
+ assert.NoError(t, err)
+ assert.Equal(t, types.Confidential, parsedModel.TechnicalAssets[taWithConfidentialConfidentialityDataAsset.ID].Confidentiality)
+ assert.Equal(t, types.Restricted, parsedModel.TechnicalAssets[taWithRestrictedConfidentialityDataAsset.ID].Confidentiality)
+ assert.Equal(t, types.Internal, parsedModel.TechnicalAssets[taWithPublicConfidentialityDataAsset.ID].Confidentiality)
+}
+
+func TestInferIntegrity_NotSet_NoOthers_ExpectTODO(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+ // TODO: rename test and check if everyone agree that by default it should be public if there are no other assets
+
+ assert.NoError(t, err)
+}
+
+func TestInferIntegrity_ExpectHighestIntegrity(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ daCriticalIntegrity := createDataAsset(types.Confidential, types.Critical, types.Critical)
+ da[daCriticalIntegrity.ID] = daCriticalIntegrity
+
+ daImportantIntegrity := createDataAsset(types.Restricted, types.Important, types.Important)
+ da[daImportantIntegrity.ID] = daImportantIntegrity
+
+ daArchiveIntegrity := createDataAsset(types.Public, types.Archive, types.Archive)
+ da[daArchiveIntegrity.ID] = daArchiveIntegrity
+
+ taWithCriticalIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithCriticalIntegrityDataAsset.DataAssetsProcessed = append(taWithCriticalIntegrityDataAsset.DataAssetsProcessed, daCriticalIntegrity.ID)
+ ta[taWithCriticalIntegrityDataAsset.ID] = taWithCriticalIntegrityDataAsset
+
+ taWithImportantIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithImportantIntegrityDataAsset.DataAssetsProcessed = append(taWithImportantIntegrityDataAsset.DataAssetsProcessed, daImportantIntegrity.ID)
+ ta[taWithImportantIntegrityDataAsset.ID] = taWithImportantIntegrityDataAsset
+
+ taWithArchiveIntegrityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithArchiveIntegrityDataAsset.DataAssetsProcessed = append(taWithArchiveIntegrityDataAsset.DataAssetsProcessed, daArchiveIntegrity.ID)
+ ta[taWithArchiveIntegrityDataAsset.ID] = taWithArchiveIntegrityDataAsset
+
+ parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+
+ assert.NoError(t, err)
+ assert.Equal(t, types.Critical, parsedModel.TechnicalAssets[taWithCriticalIntegrityDataAsset.ID].Integrity)
+ assert.Equal(t, types.Important, parsedModel.TechnicalAssets[taWithImportantIntegrityDataAsset.ID].Integrity)
+ assert.Equal(t, types.Operational, parsedModel.TechnicalAssets[taWithArchiveIntegrityDataAsset.ID].Integrity)
+}
+
+func TestInferAvailability_NotSet_NoOthers_ExpectTODO(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ _, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+
+ assert.NoError(t, err)
+}
+
+func TestInferAvailability_ExpectHighestAvailability(t *testing.T) {
+ ta := make(map[string]input.TechnicalAsset)
+ da := make(map[string]input.DataAsset)
+
+ daCriticalAvailability := createDataAsset(types.Confidential, types.Critical, types.Critical)
+ da[daCriticalAvailability.ID] = daCriticalAvailability
+
+ daImportantAvailability := createDataAsset(types.Restricted, types.Important, types.Important)
+ da[daImportantAvailability.ID] = daImportantAvailability
+
+ daArchiveAvailability := createDataAsset(types.Public, types.Archive, types.Archive)
+ da[daArchiveAvailability.ID] = daArchiveAvailability
+
+ taWithCriticalAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithCriticalAvailabilityDataAsset.DataAssetsProcessed = append(taWithCriticalAvailabilityDataAsset.DataAssetsProcessed, daCriticalAvailability.ID)
+ ta[taWithCriticalAvailabilityDataAsset.ID] = taWithCriticalAvailabilityDataAsset
+
+ taWithImportantAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithImportantAvailabilityDataAsset.DataAssetsProcessed = append(taWithImportantAvailabilityDataAsset.DataAssetsProcessed, daImportantAvailability.ID)
+ ta[taWithImportantAvailabilityDataAsset.ID] = taWithImportantAvailabilityDataAsset
+
+ taWithArchiveAvailabilityDataAsset := createTechnicalAsset(types.Internal, types.Operational, types.Operational)
+ taWithArchiveAvailabilityDataAsset.DataAssetsProcessed = append(taWithArchiveAvailabilityDataAsset.DataAssetsProcessed, daArchiveAvailability.ID)
+ ta[taWithArchiveAvailabilityDataAsset.ID] = taWithArchiveAvailabilityDataAsset
+
+ parsedModel, err := ParseModel(createInputModel(ta, da), make(map[string]risks.RiskRule), make(map[string]*CustomRisk))
+
+ assert.NoError(t, err)
+ assert.Equal(t, types.Critical, parsedModel.TechnicalAssets[taWithCriticalAvailabilityDataAsset.ID].Availability)
+ assert.Equal(t, types.Important, parsedModel.TechnicalAssets[taWithImportantAvailabilityDataAsset.ID].Availability)
+ assert.Equal(t, types.Operational, parsedModel.TechnicalAssets[taWithArchiveAvailabilityDataAsset.ID].Availability)
+}
+
+func createInputModel(technicalAssets map[string]input.TechnicalAsset, dataAssets map[string]input.DataAsset) *input.Model {
+ return &input.Model{
+ TechnicalAssets: technicalAssets,
+ DataAssets: dataAssets,
+
+ // set some dummy values to bypass validation
+ BusinessCriticality: "archive",
+ }
+}
+
+func createTechnicalAsset(confidentiality types.Confidentiality, integrity types.Criticality, availability types.Criticality) input.TechnicalAsset {
+ return input.TechnicalAsset{
+ ID: uuid.New().String(),
+ // those values are required to bypass validation
+ Usage: "business",
+ Type: "process",
+ Size: "system",
+ Technology: "unknown-technology",
+ Encryption: "none",
+ Machine: "virtual",
+ Confidentiality: confidentiality.String(),
+ Integrity: integrity.String(),
+ Availability: availability.String(),
+ }
+}
+
+func createDataAsset(confidentiality types.Confidentiality, integrity types.Criticality, availability types.Criticality) input.DataAsset {
+ return input.DataAsset{
+ ID: uuid.New().String(),
+ Usage: "business",
+ Quantity: "few",
+ Confidentiality: confidentiality.String(),
+ Integrity: integrity.String(),
+ Availability: availability.String(),
+ }
+}
diff --git a/pkg/model/read.go b/pkg/model/read.go
new file mode 100644
index 00000000..814cf8f5
--- /dev/null
+++ b/pkg/model/read.go
@@ -0,0 +1,164 @@
+package model
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/threagile/threagile/pkg/common"
+ "github.com/threagile/threagile/pkg/input"
+ "github.com/threagile/threagile/pkg/security/risks"
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type progressReporter interface {
+ Info(a ...any)
+ Warn(a ...any)
+ Error(a ...any)
+}
+
+type ReadResult struct {
+ ModelInput *input.Model
+ ParsedModel *types.ParsedModel
+ IntroTextRAA string
+ BuiltinRiskRules map[string]risks.RiskRule
+ CustomRiskRules map[string]*CustomRisk
+}
+
+// TODO: consider about splitting this function into smaller ones for better reusability
+func ReadAndAnalyzeModel(config common.Config, progressReporter progressReporter) (*ReadResult, error) {
+ progressReporter.Info("Writing into output directory:", config.OutputFolder)
+ progressReporter.Info("Parsing model:", config.InputFile)
+
+ builtinRiskRules := make(map[string]risks.RiskRule)
+ for _, rule := range risks.GetBuiltInRiskRules() {
+ builtinRiskRules[rule.Category().Id] = rule
+ }
+ customRiskRules := LoadCustomRiskRules(config.RiskRulesPlugins, progressReporter)
+
+ modelInput := new(input.Model).Defaults()
+ loadError := modelInput.Load(config.InputFile)
+ if loadError != nil {
+ return nil, fmt.Errorf("unable to load model yaml: %v", loadError)
+ }
+
+ parsedModel, parseError := ParseModel(modelInput, builtinRiskRules, customRiskRules)
+ if parseError != nil {
+ return nil, fmt.Errorf("unable to parse model yaml: %v", parseError)
+ }
+
+ introTextRAA := applyRAA(parsedModel, config.BinFolder, config.RAAPlugin, progressReporter)
+
+ applyRiskGeneration(parsedModel, customRiskRules, builtinRiskRules,
+ config.SkipRiskRules, progressReporter)
+ err := parsedModel.ApplyWildcardRiskTrackingEvaluation(config.IgnoreOrphanedRiskTracking, progressReporter)
+ if err != nil {
+ return nil, fmt.Errorf("unable to apply wildcard risk tracking evaluation: %v", err)
+ }
+
+ err = parsedModel.CheckRiskTracking(config.IgnoreOrphanedRiskTracking, progressReporter)
+ if err != nil {
+ return nil, fmt.Errorf("unable to check risk tracking: %v", err)
+ }
+
+ return &ReadResult{
+ ModelInput: modelInput,
+ ParsedModel: parsedModel,
+ IntroTextRAA: introTextRAA,
+ BuiltinRiskRules: builtinRiskRules,
+ CustomRiskRules: customRiskRules,
+ }, nil
+}
+
+func applyRisk(parsedModel *types.ParsedModel, rule risks.RiskRule, skippedRules *map[string]bool) {
+ id := rule.Category().Id
+ _, ok := (*skippedRules)[id]
+
+ if ok {
+ fmt.Printf("Skipping risk rule %q\n", rule.Category().Id)
+ delete(*skippedRules, rule.Category().Id)
+ } else {
+ parsedModel.AddToListOfSupportedTags(rule.SupportedTags())
+ generatedRisks := rule.GenerateRisks(parsedModel)
+ if generatedRisks != nil {
+ if len(generatedRisks) > 0 {
+ parsedModel.GeneratedRisksByCategory[rule.Category().Id] = generatedRisks
+ }
+ } else {
+ fmt.Printf("Failed to generate risks for %q\n", id)
+ }
+ }
+}
+
+// TODO: refactor skipRiskRules to be a string array instead of a comma-separated string
+func applyRiskGeneration(parsedModel *types.ParsedModel, customRiskRules map[string]*CustomRisk,
+ builtinRiskRules map[string]risks.RiskRule,
+ skipRiskRules string,
+ progressReporter progressReporter) {
+ progressReporter.Info("Applying risk generation")
+
+ skippedRules := make(map[string]bool)
+ if len(skipRiskRules) > 0 {
+ for _, id := range strings.Split(skipRiskRules, ",") {
+ skippedRules[id] = true
+ }
+ }
+
+ for _, rule := range builtinRiskRules {
+ applyRisk(parsedModel, rule, &skippedRules)
+ }
+
+ // NOW THE CUSTOM RISK RULES (if any)
+ for id, customRule := range customRiskRules {
+ _, ok := skippedRules[id]
+ if ok {
+ progressReporter.Info("Skipping custom risk rule:", id)
+ delete(skippedRules, id)
+ } else {
+ progressReporter.Info("Executing custom risk rule:", id)
+ parsedModel.AddToListOfSupportedTags(customRule.Tags)
+ customRisks := customRule.GenerateRisks(parsedModel)
+ if len(customRisks) > 0 {
+ parsedModel.GeneratedRisksByCategory[customRule.Category.Id] = customRisks
+ }
+
+ progressReporter.Info("Added custom risks:", len(customRisks))
+ }
+ }
+
+ if len(skippedRules) > 0 {
+ keys := make([]string, 0)
+ for k := range skippedRules {
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ progressReporter.Info("Unknown risk rules to skip:", keys)
+ }
+ }
+
+ // save also in map keyed by synthetic risk-id
+ for _, category := range types.SortedRiskCategories(parsedModel) {
+ someRisks := types.SortedRisksOfCategory(parsedModel, category)
+ for _, risk := range someRisks {
+ parsedModel.GeneratedRisksBySyntheticId[strings.ToLower(risk.SyntheticId)] = risk
+ }
+ }
+}
+
+func applyRAA(parsedModel *types.ParsedModel, binFolder, raaPlugin string, progressReporter progressReporter) string {
+ progressReporter.Info("Applying RAA calculation:", raaPlugin)
+
+ runner, loadError := new(runner).Load(filepath.Join(binFolder, raaPlugin))
+ if loadError != nil {
+ progressReporter.Warn(fmt.Sprintf("WARNING: raa %q not loaded: %v\n", raaPlugin, loadError))
+ return ""
+ }
+
+ runError := runner.Run(parsedModel, parsedModel)
+ if runError != nil {
+ progressReporter.Warn(fmt.Sprintf("WARNING: raa %q not applied: %v\n", raaPlugin, runError))
+ return ""
+ }
+
+ return runner.ErrorOutput
+}
diff --git a/pkg/model/rules.go b/pkg/model/rules.go
new file mode 100644
index 00000000..c50b4050
--- /dev/null
+++ b/pkg/model/rules.go
@@ -0,0 +1,62 @@
+package model
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type CustomRisk struct {
+ ID string
+ Category types.RiskCategory
+ Tags []string
+ Runner *runner
+}
+
+func (r *CustomRisk) GenerateRisks(m *types.ParsedModel) []types.Risk {
+ if r.Runner == nil {
+ return nil
+ }
+
+ risks := make([]types.Risk, 0)
+ runError := r.Runner.Run(m, &risks, "-generate-risks")
+ if runError != nil {
+ log.Fatalf("Failed to generate risks for custom risk rule %q: %v\n", r.Runner.Filename, runError)
+ }
+
+ return risks
+}
+
+func LoadCustomRiskRules(pluginFiles []string, reporter progressReporter) map[string]*CustomRisk {
+ customRiskRuleList := make([]string, 0)
+ customRiskRules := make(map[string]*CustomRisk)
+ if len(pluginFiles) > 0 {
+ reporter.Info("Loading custom risk rules:", strings.Join(pluginFiles, ", "))
+
+ for _, pluginFile := range pluginFiles {
+ if len(pluginFile) > 0 {
+ runner, loadError := new(runner).Load(pluginFile)
+ if loadError != nil {
+ reporter.Error(fmt.Sprintf("WARNING: Custom risk rule %q not loaded: %v\n", pluginFile, loadError))
+ }
+
+ risk := new(CustomRisk)
+ runError := runner.Run(nil, &risk, "-get-info")
+ if runError != nil {
+ reporter.Error(fmt.Sprintf("WARNING: Failed to get info for custom risk rule %q: %v\n", pluginFile, runError))
+ }
+
+ risk.Runner = runner
+ customRiskRules[risk.ID] = risk
+ customRiskRuleList = append(customRiskRuleList, risk.ID)
+ reporter.Info("Custom risk rule loaded:", risk.ID)
+ }
+ }
+
+ reporter.Info("Loaded custom risk rules:", strings.Join(customRiskRuleList, ", "))
+ }
+
+ return customRiskRules
+}
diff --git a/pkg/model/runner.go b/pkg/model/runner.go
new file mode 100644
index 00000000..3cd9c211
--- /dev/null
+++ b/pkg/model/runner.go
@@ -0,0 +1,91 @@
+// TODO: consider moving to internal
+package model
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+type runner struct {
+ Filename string
+ Parameters []string
+ In any
+ Out any
+ ErrorOutput string
+}
+
+func (p *runner) Load(filename string) (*runner, error) {
+ *p = runner{
+ Filename: filename,
+ }
+
+ fileInfo, statError := os.Stat(filename)
+ if statError != nil {
+ return p, statError
+ }
+
+ if !fileInfo.Mode().IsRegular() {
+ return p, fmt.Errorf("run %q is not a regular file", filename)
+ }
+
+ return p, nil
+}
+
+func (p *runner) Run(in any, out any, parameters ...string) error {
+ *p = runner{
+ Filename: p.Filename,
+ Parameters: parameters,
+ In: in,
+ Out: out,
+ }
+
+ plugin := exec.Command(p.Filename, p.Parameters...) // #nosec G204
+ stdin, stdinError := plugin.StdinPipe()
+ if stdinError != nil {
+ return stdinError
+ }
+ defer func() { _ = stdin.Close() }()
+
+ var stdoutBuf bytes.Buffer
+ plugin.Stdout = &stdoutBuf
+
+ var stderrBuf bytes.Buffer
+ plugin.Stderr = &stderrBuf
+
+ startError := plugin.Start()
+ if startError != nil {
+ return startError
+ }
+
+ inData, inError := json.MarshalIndent(p.In, "", " ")
+ if inError != nil {
+ return inError
+ }
+
+ _, writeError := stdin.Write(inData)
+ if writeError != nil {
+ return writeError
+ }
+
+ inCloseError := stdin.Close()
+ if inCloseError != nil {
+ return inCloseError
+ }
+
+ waitError := plugin.Wait()
+ p.ErrorOutput = stderrBuf.String()
+ if waitError != nil {
+ return fmt.Errorf("%v: %v", waitError, p.ErrorOutput)
+ }
+
+ stdout := stdoutBuf.Bytes()
+ unmarshalError := json.Unmarshal(stdout, &p.Out)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ return nil
+}
diff --git a/pkg/report/colors.go b/pkg/report/colors.go
new file mode 100644
index 00000000..e8f3fdb2
--- /dev/null
+++ b/pkg/report/colors.go
@@ -0,0 +1,472 @@
+package report
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "github.com/jung-kurt/gofpdf"
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+const (
+ Amber = "#AF780E"
+ Green = "#008000"
+ Blue = "#000080"
+ DarkBlue = "#000060"
+ Black = "#000000"
+ Gray = "#444444"
+ LightGray = "#666666"
+ MiddleLightGray = "#999999"
+ MoreLightGray = "#D2D2D2"
+ VeryLightGray = "#E5E5E5"
+ ExtremeLightGray = "#F6F6F6"
+ Pink = "#F987C5"
+ LightPink = "#FFE7EF"
+ Red = "#CC0000"
+ OutOfScopeFancy = "#D5D7FF"
+ CustomDevelopedParts = "#FFFC97"
+ ExtremeLightBlue = "#DDFFFF"
+ LightBlue = "#77FFFF"
+ Brown = "#8C4C17"
+)
+
+func DarkenHexColor(hexString string) string {
+ colorBytes, _ := hex.DecodeString(hexString[1:])
+ adjusted := make([]byte, 3)
+ for i := 0; i < 3; i++ {
+ if colorBytes[i] > 0x22 {
+ adjusted[i] = colorBytes[i] - 0x20
+ } else {
+ adjusted[i] = 0x00
+ }
+ }
+ return "#" + hex.EncodeToString(adjusted)
+}
+
+func BrightenHexColor(hexString string) string {
+ colorBytes, _ := hex.DecodeString(hexString[1:])
+ adjusted := make([]byte, 3)
+ for i := 0; i < 3; i++ {
+ if colorBytes[i] < 0xDD {
+ adjusted[i] = colorBytes[i] + 0x20
+ } else {
+ adjusted[i] = 0xFF
+ }
+ }
+ return "#" + hex.EncodeToString(adjusted)
+}
+
+func ColorCriticalRisk(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(255, 38, 0)
+}
+func RgbHexColorCriticalRisk() string {
+ return "#FF2600"
+}
+
+func ColorHighRisk(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(160, 40, 30)
+}
+func RgbHexColorHighRisk() string {
+ return "#A0281E"
+}
+
+func ColorElevatedRisk(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(255, 142, 0)
+}
+func RgbHexColorElevatedRisk() string {
+ return "#FF8E00"
+}
+
+func ColorMediumRisk(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(200, 120, 50)
+}
+func RgbHexColorMediumRisk() string {
+ return "#C87832"
+}
+
+func ColorLowRisk(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(35, 70, 95)
+}
+func RgbHexColorLowRisk() string {
+ return "#23465F"
+}
+
+func ColorOutOfScope(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(127, 127, 127)
+}
+func RgbHexColorOutOfScope() string {
+ return "#7F7F7F"
+}
+
+func ColorRiskStatusUnchecked(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(256, 0, 0)
+}
+func RgbHexColorRiskStatusUnchecked() string {
+ return "#FF0000"
+}
+
+func ColorRiskStatusMitigated(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(0, 143, 0)
+}
+func RgbHexColorRiskStatusMitigated() string {
+ return "#008F00"
+}
+
+func ColorRiskStatusInProgress(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(0, 0, 256)
+}
+func RgbHexColorRiskStatusInProgress() string {
+ return "#0000FF"
+}
+
+func ColorRiskStatusAccepted(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(255, 64, 255)
+}
+func RgbHexColorRiskStatusAccepted() string {
+ return "#FF40FF"
+}
+
+func ColorRiskStatusInDiscussion(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(256, 147, 0)
+}
+func RgbHexColorRiskStatusInDiscussion() string {
+ return "#FF9300"
+}
+
+func ColorRiskStatusFalsePositive(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(102, 102, 102)
+}
+func RgbHexColorRiskStatusFalsePositive() string {
+ return "#666666"
+}
+
+func ColorTwilight(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(58, 82, 200)
+}
+func RgbHexColorTwilight() string {
+ return "#3A52C8"
+}
+
+func ColorBusiness(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(83, 27, 147)
+}
+func RgbHexColorBusiness() string {
+ return "#531B93"
+}
+
+func ColorArchitecture(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(0, 84, 147)
+}
+func RgbHexColorArchitecture() string {
+ return "#005493"
+}
+
+func ColorDevelopment(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(222, 146, 35)
+}
+func RgbHexColorDevelopment() string {
+ return "#DE9223"
+}
+
+func ColorOperation(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(148, 127, 80)
+}
+func RgbHexColorOperation() string {
+ return "#947F50"
+}
+
+func ColorModelFailure(pdf *gofpdf.Fpdf) {
+ pdf.SetTextColor(148, 82, 0)
+}
+func RgbHexColorModelFailure() string {
+ return "#945200"
+}
+
+func determineArrowLineStyle(cl types.CommunicationLink) string {
+ if len(cl.DataAssetsSent) == 0 && len(cl.DataAssetsReceived) == 0 {
+ return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery...
+ }
+ if cl.Usage == types.DevOps {
+ return "dashed"
+ }
+ return "solid"
+}
+
+// Pen Widths:
+
+func determineArrowPenWidth(cl types.CommunicationLink, parsedModel *types.ParsedModel) string {
+ if determineArrowColor(cl, parsedModel) == Pink {
+ return fmt.Sprintf("%f", 3.0)
+ }
+ if determineArrowColor(cl, parsedModel) != Black {
+ return fmt.Sprintf("%f", 2.5)
+ }
+ return fmt.Sprintf("%f", 1.5)
+}
+
+func determineLabelColor(cl types.CommunicationLink, parsedModel *types.ParsedModel) string {
+ // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here
+ /*
+ if dataFlow.Protocol.IsEncrypted() {
+ return Gray
+ } else {*/
+ // check for red
+ for _, sentDataAsset := range cl.DataAssetsSent {
+ if parsedModel.DataAssets[sentDataAsset].Integrity == types.MissionCritical {
+ return Red
+ }
+ }
+ for _, receivedDataAsset := range cl.DataAssetsReceived {
+ if parsedModel.DataAssets[receivedDataAsset].Integrity == types.MissionCritical {
+ return Red
+ }
+ }
+ // check for amber
+ for _, sentDataAsset := range cl.DataAssetsSent {
+ if parsedModel.DataAssets[sentDataAsset].Integrity == types.Critical {
+ return Amber
+ }
+ }
+ for _, receivedDataAsset := range cl.DataAssetsReceived {
+ if parsedModel.DataAssets[receivedDataAsset].Integrity == types.Critical {
+ return Amber
+ }
+ }
+ // default
+ return Gray
+
+}
+
+// pink when model forgery attempt (i.e. nothing being sent and received)
+
+func determineArrowColor(cl types.CommunicationLink, parsedModel *types.ParsedModel) string {
+ // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here
+ if len(cl.DataAssetsSent) == 0 && len(cl.DataAssetsReceived) == 0 ||
+ cl.Protocol == types.UnknownProtocol {
+ return Pink // pink, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery...
+ }
+ if cl.Usage == types.DevOps {
+ return MiddleLightGray
+ } else if cl.VPN {
+ return DarkBlue
+ } else if cl.IpFiltered {
+ return Brown
+ }
+ // check for red
+ for _, sentDataAsset := range cl.DataAssetsSent {
+ if parsedModel.DataAssets[sentDataAsset].Confidentiality == types.StrictlyConfidential {
+ return Red
+ }
+ }
+ for _, receivedDataAsset := range cl.DataAssetsReceived {
+ if parsedModel.DataAssets[receivedDataAsset].Confidentiality == types.StrictlyConfidential {
+ return Red
+ }
+ }
+ // check for amber
+ for _, sentDataAsset := range cl.DataAssetsSent {
+ if parsedModel.DataAssets[sentDataAsset].Confidentiality == types.Confidential {
+ return Amber
+ }
+ }
+ for _, receivedDataAsset := range cl.DataAssetsReceived {
+ if parsedModel.DataAssets[receivedDataAsset].Confidentiality == types.Confidential {
+ return Amber
+ }
+ }
+ // default
+ return Black
+ /*
+ } else if dataFlow.Authentication != NoneAuthentication {
+ return Black
+ } else {
+ // check for red
+ for _, sentDataAsset := range dataFlow.DataAssetsSent { // first check if any red?
+ if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == MissionCritical {
+ return Red
+ }
+ }
+ for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // first check if any red?
+ if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == MissionCritical {
+ return Red
+ }
+ }
+ // check for amber
+ for _, sentDataAsset := range dataFlow.DataAssetsSent { // then check if any amber?
+ if ParsedModelRoot.DataAssets[sentDataAsset].Integrity == Critical {
+ return Amber
+ }
+ }
+ for _, receivedDataAsset := range dataFlow.DataAssetsReceived { // then check if any amber?
+ if ParsedModelRoot.DataAssets[receivedDataAsset].Integrity == Critical {
+ return Amber
+ }
+ }
+ return Black
+ }
+ */
+}
+
+// red when >= confidential data stored in unencrypted technical asset
+
+func determineTechnicalAssetLabelColor(ta types.TechnicalAsset, model *types.ParsedModel) string {
+ // TODO: Just move into main.go and let the generated risk determine the color, don't duplicate the logic here
+ // Check for red
+ if ta.Integrity == types.MissionCritical {
+ return Red
+ }
+ for _, storedDataAsset := range ta.DataAssetsStored {
+ if model.DataAssets[storedDataAsset].Integrity == types.MissionCritical {
+ return Red
+ }
+ }
+ for _, processedDataAsset := range ta.DataAssetsProcessed {
+ if model.DataAssets[processedDataAsset].Integrity == types.MissionCritical {
+ return Red
+ }
+ }
+ // Check for amber
+ if ta.Integrity == types.Critical {
+ return Amber
+ }
+ for _, storedDataAsset := range ta.DataAssetsStored {
+ if model.DataAssets[storedDataAsset].Integrity == types.Critical {
+ return Amber
+ }
+ }
+ for _, processedDataAsset := range ta.DataAssetsProcessed {
+ if model.DataAssets[processedDataAsset].Integrity == types.Critical {
+ return Amber
+ }
+ }
+ return Black
+ /*
+ if what.Encrypted {
+ return Black
+ } else {
+ if what.Confidentiality == StrictlyConfidential {
+ return Red
+ }
+ for _, storedDataAsset := range what.DataAssetsStored {
+ if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == StrictlyConfidential {
+ return Red
+ }
+ }
+ if what.Confidentiality == Confidential {
+ return Amber
+ }
+ for _, storedDataAsset := range what.DataAssetsStored {
+ if ParsedModelRoot.DataAssets[storedDataAsset].Confidentiality == Confidential {
+ return Amber
+ }
+ }
+ return Black
+ }
+ */
+}
+
+// red when mission-critical integrity, but still unauthenticated (non-readonly) channels access it
+// amber when critical integrity, but still unauthenticated (non-readonly) channels access it
+// pink when model forgery attempt (i.e. nothing being processed)
+func determineShapeBorderColor(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string {
+ // Check for red
+ if ta.Confidentiality == types.StrictlyConfidential {
+ return Red
+ }
+ for _, processedDataAsset := range ta.DataAssetsProcessed {
+ if parsedModel.DataAssets[processedDataAsset].Confidentiality == types.StrictlyConfidential {
+ return Red
+ }
+ }
+ // Check for amber
+ if ta.Confidentiality == types.Confidential {
+ return Amber
+ }
+ for _, processedDataAsset := range ta.DataAssetsProcessed {
+ if parsedModel.DataAssets[processedDataAsset].Confidentiality == types.Confidential {
+ return Amber
+ }
+ }
+ return Black
+ /*
+ if what.Integrity == MissionCritical {
+ for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] {
+ if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication {
+ return Red
+ }
+ }
+ }
+
+ if what.Integrity == Critical {
+ for _, dataFlow := range IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] {
+ if !dataFlow.Readonly && dataFlow.Authentication == NoneAuthentication {
+ return Amber
+ }
+ }
+ }
+
+ if len(what.DataAssetsProcessed) == 0 && len(what.DataAssetsStored) == 0 {
+ return Pink // pink, because it's strange when too many technical assets process no data... some are ok, but many in a diagram is a sign of model forgery...
+ }
+
+ return Black
+ */
+}
+
+// dotted when model forgery attempt (i.e. nothing being processed or stored)
+
+func determineShapeBorderLineStyle(ta types.TechnicalAsset) string {
+ if len(ta.DataAssetsProcessed) == 0 || ta.OutOfScope {
+ return "dotted" // dotted, because it's strange when too many technical communication links transfer no data... some ok, but many in a diagram ist a sign of model forgery...
+ }
+ return "solid"
+}
+
+// 3 when redundant
+
+func determineShapePeripheries(ta types.TechnicalAsset) int {
+ if ta.Redundant {
+ return 2
+ }
+ return 1
+}
+
+func determineShapeStyle(ta types.TechnicalAsset) string {
+ return "filled"
+}
+
+func determineShapeFillColor(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string {
+ fillColor := VeryLightGray
+ if len(ta.DataAssetsProcessed) == 0 && len(ta.DataAssetsStored) == 0 ||
+ ta.Technology == types.UnknownTechnology {
+ fillColor = LightPink // lightPink, because it's strange when too many technical assets process no data... some ok, but many in a diagram ist a sign of model forgery...
+ } else if len(ta.CommunicationLinks) == 0 && len(parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[ta.Id]) == 0 {
+ fillColor = LightPink
+ } else if ta.Internet {
+ fillColor = ExtremeLightBlue
+ } else if ta.OutOfScope {
+ fillColor = OutOfScopeFancy
+ } else if ta.CustomDevelopedParts {
+ fillColor = CustomDevelopedParts
+ }
+ switch ta.Machine {
+ case types.Physical:
+ fillColor = DarkenHexColor(fillColor)
+ case types.Container:
+ fillColor = BrightenHexColor(fillColor)
+ case types.Serverless:
+ fillColor = BrightenHexColor(BrightenHexColor(fillColor))
+ case types.Virtual:
+ }
+ return fillColor
+}
+
+func determineShapeBorderPenWidth(ta types.TechnicalAsset, parsedModel *types.ParsedModel) string {
+ if determineShapeBorderColor(ta, parsedModel) == Pink {
+ return fmt.Sprintf("%f", 3.5)
+ }
+ if determineShapeBorderColor(ta, parsedModel) != Black {
+ return fmt.Sprintf("%f", 3.0)
+ }
+ return fmt.Sprintf("%f", 2.0)
+}
diff --git a/report/excel.go b/pkg/report/excel.go
similarity index 73%
rename from report/excel.go
rename to pkg/report/excel.go
index 3159b04b..38bccb94 100644
--- a/report/excel.go
+++ b/pkg/report/excel.go
@@ -1,42 +1,45 @@
package report
import (
- "github.com/threagile/threagile/colors"
- "github.com/threagile/threagile/model"
- "github.com/xuri/excelize/v2"
+ "fmt"
"sort"
"strconv"
"strings"
-)
-var excelRow int
+ "github.com/threagile/threagile/pkg/security/types"
+ "github.com/xuri/excelize/v2"
+)
-func WriteRisksExcelToFile(filename string) {
- excelRow = 0
+func WriteRisksExcelToFile(parsedModel *types.ParsedModel, filename string) error {
+ excelRow := 0
excel := excelize.NewFile()
- sheetName := model.ParsedModelRoot.Title
+ sheetName := parsedModel.Title
err := excel.SetDocProps(&excelize.DocProperties{
Category: "Threat Model Risks Summary",
ContentStatus: "Final",
- Creator: model.ParsedModelRoot.Author.Name,
+ Creator: parsedModel.Author.Name,
Description: sheetName + " via Threagile",
Identifier: "xlsx",
Keywords: "Threat Model",
- LastModifiedBy: model.ParsedModelRoot.Author.Name,
+ LastModifiedBy: parsedModel.Author.Name,
Revision: "0",
Subject: sheetName,
Title: sheetName,
Language: "en-US",
Version: "1.0.0",
})
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set doc properties: %w", err)
+ }
sheetIndex, _ := excel.NewSheet(sheetName)
_ = excel.DeleteSheet("Sheet1")
orientation := "landscape"
size := 9
err = excel.SetPageLayout(sheetName, &excelize.PageLayoutOptions{Orientation: &orientation, Size: &size}) // A4
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set page layout: %w", err)
+ }
err = excel.SetHeaderFooter(sheetName, &excelize.HeaderFooterOptions{
DifferentFirst: false,
@@ -45,9 +48,11 @@ func WriteRisksExcelToFile(filename string) {
OddFooter: "&C&F",
EvenHeader: "&L&P",
EvenFooter: "&L&D&R&T",
- FirstHeader: `&Threat Model &"-,` + model.ParsedModelRoot.Title + `"Bold&"-,Regular"Risks Summary+000A&D`,
+ FirstHeader: `&Threat Model &"-,` + parsedModel.Title + `"Bold&"-,Regular"Risks Summary+000A&D`,
})
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set header/footer: %w", err)
+ }
err = excel.SetCellValue(sheetName, "A1", "Severity")
err = excel.SetCellValue(sheetName, "B1", "Likelihood")
@@ -90,17 +95,19 @@ func WriteRisksExcelToFile(filename string) {
err = excel.SetColWidth(sheetName, "R", "R", 18)
err = excel.SetColWidth(sheetName, "S", "S", 20)
err = excel.SetColWidth(sheetName, "T", "T", 20)
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set column width: %w", err)
+ }
- // styleSeverityCriticalBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorCriticalRisk() + `","size":12,"bold":true}}`)
+ // styleSeverityCriticalBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorCriticalRisk() + `","size":12,"bold":true}}`)
styleSeverityCriticalBold, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorCriticalRisk(),
+ Color: RgbHexColorCriticalRisk(),
Size: 12,
Bold: true,
},
})
- // styleSeverityCriticalCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorCriticalRisk() + `","size":12}}`)
+ // styleSeverityCriticalCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorCriticalRisk() + `","size":12}}`)
styleSeverityCriticalCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -108,19 +115,19 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorCriticalRisk(),
+ Color: RgbHexColorCriticalRisk(),
Size: 12,
},
})
- // styleSeverityHighBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorHighRisk() + `","size":12,"bold":true}}`)
+ // styleSeverityHighBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorHighRisk() + `","size":12,"bold":true}}`)
styleSeverityHighBold, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorHighRisk(),
+ Color: RgbHexColorHighRisk(),
Size: 12,
Bold: true,
},
})
- // styleSeverityHighCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorHighRisk() + `","size":12}}`)
+ // styleSeverityHighCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorHighRisk() + `","size":12}}`)
styleSeverityHighCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -128,19 +135,19 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorHighRisk(),
+ Color: RgbHexColorHighRisk(),
Size: 12,
},
})
- // styleSeverityElevatedBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorElevatedRisk() + `","size":12,"bold":true}}`)
+ // styleSeverityElevatedBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorElevatedRisk() + `","size":12,"bold":true}}`)
styleSeverityElevatedBold, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorElevatedRisk(),
+ Color: RgbHexColorElevatedRisk(),
Size: 12,
Bold: true,
},
})
- // styleSeverityElevatedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorElevatedRisk() + `","size":12}}`)
+ // styleSeverityElevatedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorElevatedRisk() + `","size":12}}`)
styleSeverityElevatedCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -148,19 +155,19 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorElevatedRisk(),
+ Color: RgbHexColorElevatedRisk(),
Size: 12,
},
})
- // styleSeverityMediumBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorMediumRisk() + `","size":12,"bold":true}}`)
+ // styleSeverityMediumBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorMediumRisk() + `","size":12,"bold":true}}`)
styleSeverityMediumBold, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorMediumRisk(),
+ Color: RgbHexColorMediumRisk(),
Size: 12,
Bold: true,
},
})
- // styleSeverityMediumCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorMediumRisk() + `","size":12}}`)
+ // styleSeverityMediumCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorMediumRisk() + `","size":12}}`)
styleSeverityMediumCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -168,19 +175,19 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorMediumRisk(),
+ Color: RgbHexColorMediumRisk(),
Size: 12,
},
})
- // styleSeverityLowBold, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorLowRisk() + `","size":12,"bold":true}}`)
+ // styleSeverityLowBold, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorLowRisk() + `","size":12,"bold":true}}`)
styleSeverityLowBold, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorLowRisk(),
+ Color: RgbHexColorLowRisk(),
Size: 12,
Bold: true,
},
})
- // styleSeverityLowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorLowRisk() + `","size":12}}`)
+ // styleSeverityLowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorLowRisk() + `","size":12}}`)
styleSeverityLowCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -188,12 +195,12 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorLowRisk(),
+ Color: RgbHexColorLowRisk(),
Size: 12,
},
})
- // styleRedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorRiskStatusUnchecked() + `","size":12}}`)
+ // styleRedCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorRiskStatusUnchecked() + `","size":12}}`)
styleRedCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -201,11 +208,11 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorLowRisk(),
+ Color: RgbHexColorLowRisk(),
Size: 12,
},
})
- // styleGreenCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + colors.RgbHexColorRiskStatusMitigated() + `","size":12}}`)
+ // styleGreenCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"` + RgbHexColorRiskStatusMitigated() + `","size":12}}`)
styleGreenCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -213,11 +220,11 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusMitigated(),
+ Color: RgbHexColorRiskStatusMitigated(),
Size: 12,
},
})
- // styleBlueCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusInProgress() + `","size":12}}`)
+ // styleBlueCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusInProgress() + `","size":12}}`)
styleBlueCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -225,11 +232,11 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusInProgress(),
+ Color: RgbHexColorRiskStatusInProgress(),
Size: 12,
},
})
- // styleYellowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusAccepted() + `","size":12}}`)
+ // styleYellowCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusAccepted() + `","size":12}}`)
styleYellowCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -237,11 +244,11 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusAccepted(),
+ Color: RgbHexColorRiskStatusAccepted(),
Size: 12,
},
})
- // styleOrangeCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusInDiscussion() + `","size":12}}`)
+ // styleOrangeCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusInDiscussion() + `","size":12}}`)
styleOrangeCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -249,11 +256,11 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusInDiscussion(),
+ Color: RgbHexColorRiskStatusInDiscussion(),
Size: 12,
},
})
- // styleGrayCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + colors.RgbHexColorRiskStatusFalsePositive() + `","size":12}}`)
+ // styleGrayCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#` + RgbHexColorRiskStatusFalsePositive() + `","size":12}}`)
styleGrayCenter, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
Horizontal: "center",
@@ -261,7 +268,7 @@ func WriteRisksExcelToFile(filename string) {
WrapText: false,
},
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusFalsePositive(),
+ Color: RgbHexColorRiskStatusFalsePositive(),
Size: 12,
},
})
@@ -308,10 +315,10 @@ func WriteRisksExcelToFile(filename string) {
Size: 10,
},
})
- // styleGraySmall, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorOutOfScope() + `","size":10}}`)
+ // styleGraySmall, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorOutOfScope() + `","size":10}}`)
styleGraySmall, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorOutOfScope(),
+ Color: RgbHexColorOutOfScope(),
Size: 10,
},
})
@@ -328,41 +335,41 @@ func WriteRisksExcelToFile(filename string) {
Bold: true,
},
})
- // styleMitigation, err := excel.NewStyle(`{"font":{"color":"` + colors.RgbHexColorRiskStatusMitigated() + `","size":10}}`)
+ // styleMitigation, err := excel.NewStyle(`{"font":{"color":"` + RgbHexColorRiskStatusMitigated() + `","size":10}}`)
styleMitigation, err := excel.NewStyle(&excelize.Style{
Font: &excelize.Font{
- Color: colors.RgbHexColorRiskStatusMitigated(),
+ Color: RgbHexColorRiskStatusMitigated(),
Size: 10,
},
})
excelRow++ // as we have a header line
- for _, category := range model.SortedRiskCategories() {
- risks := model.SortedRisksOfCategory(category)
+ for _, category := range types.SortedRiskCategories(parsedModel) {
+ risks := types.SortedRisksOfCategory(parsedModel, category)
for _, risk := range risks {
excelRow++
- techAsset := model.ParsedModelRoot.TechnicalAssets[risk.MostRelevantTechnicalAssetId]
- commLink := model.CommunicationLinks[risk.MostRelevantCommunicationLinkId]
- riskTrackingStatus := risk.GetRiskTrackingStatusDefaultingUnchecked()
+ techAsset := parsedModel.TechnicalAssets[risk.MostRelevantTechnicalAssetId]
+ commLink := parsedModel.CommunicationLinks[risk.MostRelevantCommunicationLinkId]
+ riskTrackingStatus := risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
// content
err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(excelRow), risk.Severity.Title())
err = excel.SetCellValue(sheetName, "B"+strconv.Itoa(excelRow), risk.ExploitationLikelihood.Title())
err = excel.SetCellValue(sheetName, "C"+strconv.Itoa(excelRow), risk.ExploitationImpact.Title())
- err = excel.SetCellValue(sheetName, "D"+strconv.Itoa(excelRow), risk.Category.STRIDE.Title())
- err = excel.SetCellValue(sheetName, "E"+strconv.Itoa(excelRow), risk.Category.Function.Title())
- err = excel.SetCellValue(sheetName, "F"+strconv.Itoa(excelRow), "CWE-"+strconv.Itoa(risk.Category.CWE))
- err = excel.SetCellValue(sheetName, "G"+strconv.Itoa(excelRow), risk.Category.Title)
+ err = excel.SetCellValue(sheetName, "D"+strconv.Itoa(excelRow), category.STRIDE.Title())
+ err = excel.SetCellValue(sheetName, "E"+strconv.Itoa(excelRow), category.Function.Title())
+ err = excel.SetCellValue(sheetName, "F"+strconv.Itoa(excelRow), "CWE-"+strconv.Itoa(category.CWE))
+ err = excel.SetCellValue(sheetName, "G"+strconv.Itoa(excelRow), category.Title)
err = excel.SetCellValue(sheetName, "H"+strconv.Itoa(excelRow), techAsset.Title)
err = excel.SetCellValue(sheetName, "I"+strconv.Itoa(excelRow), commLink.Title)
err = excel.SetCellFloat(sheetName, "J"+strconv.Itoa(excelRow), techAsset.RAA, 0, 32)
err = excel.SetCellValue(sheetName, "K"+strconv.Itoa(excelRow), removeFormattingTags(risk.Title))
- err = excel.SetCellValue(sheetName, "L"+strconv.Itoa(excelRow), risk.Category.Action)
- err = excel.SetCellValue(sheetName, "M"+strconv.Itoa(excelRow), risk.Category.Mitigation)
- err = excel.SetCellValue(sheetName, "N"+strconv.Itoa(excelRow), risk.Category.Check)
+ err = excel.SetCellValue(sheetName, "L"+strconv.Itoa(excelRow), category.Action)
+ err = excel.SetCellValue(sheetName, "M"+strconv.Itoa(excelRow), category.Mitigation)
+ err = excel.SetCellValue(sheetName, "N"+strconv.Itoa(excelRow), category.Check)
err = excel.SetCellValue(sheetName, "O"+strconv.Itoa(excelRow), risk.SyntheticId)
err = excel.SetCellValue(sheetName, "P"+strconv.Itoa(excelRow), riskTrackingStatus.Title())
- if riskTrackingStatus != model.Unchecked {
- riskTracking := risk.GetRiskTracking()
+ if riskTrackingStatus != types.Unchecked {
+ riskTracking := risk.GetRiskTracking(parsedModel)
err = excel.SetCellValue(sheetName, "Q"+strconv.Itoa(excelRow), riskTracking.Justification)
if !riskTracking.Date.IsZero() {
err = excel.SetCellValue(sheetName, "R"+strconv.Itoa(excelRow), riskTracking.Date.Format("2006-01-02"))
@@ -373,19 +380,19 @@ func WriteRisksExcelToFile(filename string) {
// styles
if riskTrackingStatus.IsStillAtRisk() {
switch risk.Severity {
- case model.CriticalSeverity:
+ case types.CriticalSeverity:
err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityCriticalCenter)
err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityCriticalBold)
- case model.HighSeverity:
+ case types.HighSeverity:
err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityHighCenter)
err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityHighBold)
- case model.ElevatedSeverity:
+ case types.ElevatedSeverity:
err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityElevatedCenter)
err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityElevatedBold)
- case model.MediumSeverity:
+ case types.MediumSeverity:
err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityMediumCenter)
err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityMediumBold)
- case model.LowSeverity:
+ case types.LowSeverity:
err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "F"+strconv.Itoa(excelRow), styleSeverityLowCenter)
err = excel.SetCellStyle(sheetName, "G"+strconv.Itoa(excelRow), "I"+strconv.Itoa(excelRow), styleSeverityLowBold)
}
@@ -395,17 +402,17 @@ func WriteRisksExcelToFile(filename string) {
}
styleFromRiskTracking := styleBlackCenter
switch riskTrackingStatus {
- case model.Unchecked:
+ case types.Unchecked:
styleFromRiskTracking = styleRedCenter
- case model.Mitigated:
+ case types.Mitigated:
styleFromRiskTracking = styleGreenCenter
- case model.InProgress:
+ case types.InProgress:
styleFromRiskTracking = styleBlueCenter
- case model.Accepted:
+ case types.Accepted:
styleFromRiskTracking = styleYellowCenter
- case model.InDiscussion:
+ case types.InDiscussion:
styleFromRiskTracking = styleOrangeCenter
- case model.FalsePositive:
+ case types.FalsePositive:
styleFromRiskTracking = styleGrayCenter
default:
styleFromRiskTracking = styleBlackCenter
@@ -421,7 +428,9 @@ func WriteRisksExcelToFile(filename string) {
err = excel.SetCellStyle(sheetName, "R"+strconv.Itoa(excelRow), "R"+strconv.Itoa(excelRow), styleBlackCenter)
err = excel.SetCellStyle(sheetName, "S"+strconv.Itoa(excelRow), "S"+strconv.Itoa(excelRow), styleBlackCenter)
err = excel.SetCellStyle(sheetName, "T"+strconv.Itoa(excelRow), "T"+strconv.Itoa(excelRow), styleBlackLeft)
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set cell style: %w", err)
+ }
}
}
@@ -447,39 +456,48 @@ func WriteRisksExcelToFile(filename string) {
})
err = excel.SetCellStyle(sheetName, "A1", "T1", styleHeadCenter)
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set cell style: %w", err)
+ }
excel.SetActiveSheet(sheetIndex)
err = excel.SaveAs(filename)
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to save excel file: %w", err)
+ }
+ return nil
}
-func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sortedTagsAvailable) == 0 is: write a hint in the execel that no tags are used
- excelRow = 0
+func WriteTagsExcelToFile(parsedModel *types.ParsedModel, filename string) error { // TODO: eventually when len(sortedTagsAvailable) == 0 is: write a hint in the Excel that no tags are used
+ excelRow := 0
excel := excelize.NewFile()
- sheetName := model.ParsedModelRoot.Title
+ sheetName := parsedModel.Title
err := excel.SetDocProps(&excelize.DocProperties{
Category: "Tag Matrix",
ContentStatus: "Final",
- Creator: model.ParsedModelRoot.Author.Name,
+ Creator: parsedModel.Author.Name,
Description: sheetName + " via Threagile",
Identifier: "xlsx",
Keywords: "Tag Matrix",
- LastModifiedBy: model.ParsedModelRoot.Author.Name,
+ LastModifiedBy: parsedModel.Author.Name,
Revision: "0",
Subject: sheetName,
Title: sheetName,
Language: "en-US",
Version: "1.0.0",
})
- checkErr(err)
+ if err != nil {
+ return err
+ }
sheetIndex, _ := excel.NewSheet(sheetName)
_ = excel.DeleteSheet("Sheet1")
orientation := "landscape"
size := 9
err = excel.SetPageLayout(sheetName, &excelize.PageLayoutOptions{Orientation: &orientation, Size: &size}) // A4
- checkErr(err)
+ if err != nil {
+ return err
+ }
err = excel.SetHeaderFooter(sheetName, &excelize.HeaderFooterOptions{
DifferentFirst: false,
@@ -488,12 +506,14 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted
OddFooter: "&C&F",
EvenHeader: "&L&P",
EvenFooter: "&L&D&R&T",
- FirstHeader: `&Tag Matrix &"-,` + model.ParsedModelRoot.Title + `"Bold&"-,Regular"Summary+000A&D`,
+ FirstHeader: `&Tag Matrix &"-,` + parsedModel.Title + `"Bold&"-,Regular"Summary+000A&D`,
})
- checkErr(err)
+ if err != nil {
+ return err
+ }
- err = excel.SetCellValue(sheetName, "A1", "Element") // TODO is "Element" the correct generic name when referencing assets, links, trust boudaries etc.? Eventually add separate column "type of element" like "technical asset" or "data asset"?
- sortedTagsAvailable := model.TagsActuallyUsed()
+ err = excel.SetCellValue(sheetName, "A1", "Element") // TODO is "Element" the correct generic name when referencing assets, links, trust boundaries etc.? Eventually add separate column "type of element" like "technical asset" or "data asset"?
+ sortedTagsAvailable := parsedModel.TagsActuallyUsed()
sort.Strings(sortedTagsAvailable)
axis := ""
for i, tag := range sortedTagsAvailable {
@@ -502,10 +522,16 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted
}
err = excel.SetColWidth(sheetName, "A", "A", 60)
+ if err != nil {
+ return err
+ }
+
if len(sortedTagsAvailable) > 0 {
err = excel.SetColWidth(sheetName, "B", axis, 35)
}
- checkErr(err)
+ if err != nil {
+ return err
+ }
// styleBlackCenter, err := excel.NewStyle(`{"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false},"font":{"color":"#000000","size":12}}`)
styleBlackCenter, err := excel.NewStyle(&excelize.Style{
@@ -535,20 +561,35 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted
excelRow++ // as we have a header line
if len(sortedTagsAvailable) > 0 {
- for _, techAsset := range model.SortedTechnicalAssetsByTitle() {
- writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, techAsset.Title, techAsset.Tags)
+ for _, techAsset := range sortedTechnicalAssetsByTitle(parsedModel) {
+ err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, techAsset.Title, techAsset.Tags)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
for _, commLink := range techAsset.CommunicationLinksSorted() {
- writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, commLink.Title, commLink.Tags)
+ err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, commLink.Title, commLink.Tags)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
}
}
- for _, dataAsset := range model.SortedDataAssetsByTitle() {
- writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, dataAsset.Title, dataAsset.Tags)
+ for _, dataAsset := range sortedDataAssetsByTitle(parsedModel) {
+ err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, dataAsset.Title, dataAsset.Tags)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
}
- for _, trustBoundary := range model.SortedTrustBoundariesByTitle() {
- writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, trustBoundary.Title, trustBoundary.Tags)
+ for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) {
+ err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, trustBoundary.Title, trustBoundary.Tags)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
}
- for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() {
- writeRow(excel, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, sharedRuntime.Title, sharedRuntime.Tags)
+ for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) {
+ err := writeRow(excel, &excelRow, sheetName, axis, styleBlackLeftBold, styleBlackCenter, sortedTagsAvailable, sharedRuntime.Title, sharedRuntime.Tags)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
}
}
@@ -569,6 +610,9 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted
Pattern: 1,
},
})
+ if err != nil {
+ return fmt.Errorf("unable to set cell style: %w", err)
+ }
// styleHeadCenterBold, err := excel.NewStyle(`{"font":{"bold":true,"italic":false,"size":14,"color":"#000000"},"fill":{"type":"pattern","color":["#eeeeee"],"pattern":1},"alignment":{"horizontal":"center","shrink_to_fit":true,"wrap_text":false}}`)
styleHeadCenterBold, err := excel.NewStyle(&excelize.Style{
Alignment: &excelize.Alignment{
@@ -591,31 +635,63 @@ func WriteTagsExcelToFile(filename string) { // TODO: eventually when len(sorted
if len(sortedTagsAvailable) > 0 {
err = excel.SetCellStyle(sheetName, "B1", axis+"1", styleHeadCenter)
}
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to set cell style: %w", err)
+ }
excel.SetActiveSheet(sheetIndex)
err = excel.SaveAs(filename)
- checkErr(err)
+ if err != nil {
+ return fmt.Errorf("unable to save excel file: %w", err)
+ }
+ return nil
}
-func writeRow(excel *excelize.File, sheetName string, axis string, styleBlackLeftBold int, styleBlackCenter int,
- sortedTags []string, assetTitle string, tagsUsed []string) {
- excelRow++
- err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(excelRow), assetTitle)
+func sortedTrustBoundariesByTitle(parsedModel *types.ParsedModel) []types.TrustBoundary {
+ boundaries := make([]types.TrustBoundary, 0)
+ for _, boundary := range parsedModel.TrustBoundaries {
+ boundaries = append(boundaries, boundary)
+ }
+ sort.Sort(types.ByTrustBoundaryTitleSort(boundaries))
+ return boundaries
+}
+
+func sortedDataAssetsByTitle(parsedModel *types.ParsedModel) []types.DataAsset {
+ assets := make([]types.DataAsset, 0)
+ for _, asset := range parsedModel.DataAssets {
+ assets = append(assets, asset)
+ }
+ sort.Sort(types.ByDataAssetTitleSort(assets))
+ return assets
+}
+
+func writeRow(excel *excelize.File, excelRow *int, sheetName string, axis string, styleBlackLeftBold int, styleBlackCenter int,
+ sortedTags []string, assetTitle string, tagsUsed []string) error {
+ *excelRow++
+ err := excel.SetCellValue(sheetName, "A"+strconv.Itoa(*excelRow), assetTitle)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
for i, tag := range sortedTags {
- if model.Contains(tagsUsed, tag) {
- err = excel.SetCellValue(sheetName, determineColumnLetter(i)+strconv.Itoa(excelRow), "X")
+ if contains(tagsUsed, tag) {
+ err = excel.SetCellValue(sheetName, determineColumnLetter(i)+strconv.Itoa(*excelRow), "X")
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
}
}
- err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(excelRow), "A"+strconv.Itoa(excelRow), styleBlackLeftBold)
- err = excel.SetCellStyle(sheetName, "B"+strconv.Itoa(excelRow), axis+strconv.Itoa(excelRow), styleBlackCenter)
- checkErr(err)
+ err = excel.SetCellStyle(sheetName, "A"+strconv.Itoa(*excelRow), "A"+strconv.Itoa(*excelRow), styleBlackLeftBold)
+ err = excel.SetCellStyle(sheetName, "B"+strconv.Itoa(*excelRow), axis+strconv.Itoa(*excelRow), styleBlackCenter)
+ if err != nil {
+ return fmt.Errorf("unable to write row: %w", err)
+ }
+ return nil
}
var alphabet = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"}
func determineColumnLetter(i int) string {
- // can only have 700 columns in excel that way, but that should be more than usable anyway ;)... otherwise think about your model...
+ // can only have 700 columns in Excel that way, but that should be more than usable anyway ;)... otherwise think about your model...
i++
if i < 26 {
return alphabet[i]
diff --git a/pkg/report/generate.go b/pkg/report/generate.go
new file mode 100644
index 00000000..d1c9ef44
--- /dev/null
+++ b/pkg/report/generate.go
@@ -0,0 +1,192 @@
+package report
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/threagile/threagile/pkg/common"
+ "github.com/threagile/threagile/pkg/model"
+)
+
+type GenerateCommands struct {
+ DataFlowDiagram bool
+ DataAssetDiagram bool
+ RisksJSON bool
+ TechnicalAssetsJSON bool
+ StatsJSON bool
+ RisksExcel bool
+ TagsExcel bool
+ ReportPDF bool
+}
+
+func (c *GenerateCommands) Defaults() *GenerateCommands {
+ *c = GenerateCommands{
+ DataFlowDiagram: true,
+ DataAssetDiagram: true,
+ RisksJSON: true,
+ TechnicalAssetsJSON: true,
+ StatsJSON: true,
+ RisksExcel: true,
+ TagsExcel: true,
+ ReportPDF: true,
+ }
+ return c
+}
+
+func Generate(config *common.Config, readResult *model.ReadResult, commands *GenerateCommands, progressReporter progressReporter) error {
+ generateDataFlowDiagram := commands.DataFlowDiagram
+ generateDataAssetsDiagram := commands.DataAssetDiagram
+ if commands.ReportPDF { // as the PDF report includes both diagrams
+ generateDataFlowDiagram = true
+ generateDataAssetsDiagram = true
+ }
+
+ diagramDPI := config.DiagramDPI
+ if diagramDPI < common.MinGraphvizDPI {
+ diagramDPI = common.MinGraphvizDPI
+ } else if diagramDPI > common.MaxGraphvizDPI {
+ diagramDPI = common.MaxGraphvizDPI
+ }
+ // Data-flow Diagram rendering
+ if generateDataFlowDiagram {
+ gvFile := filepath.Join(config.OutputFolder, config.DataFlowDiagramFilenameDOT)
+ if !config.KeepDiagramSourceFiles {
+ tmpFileGV, err := os.CreateTemp(config.TempFolder, config.DataFlowDiagramFilenameDOT)
+ if err != nil {
+ return err
+ }
+ gvFile = tmpFileGV.Name()
+ defer func() { _ = os.Remove(gvFile) }()
+ }
+ dotFile, err := WriteDataFlowDiagramGraphvizDOT(readResult.ParsedModel, gvFile, diagramDPI, config.AddModelTitle, progressReporter)
+ if err != nil {
+ return fmt.Errorf("error while generating data flow diagram: %s", err)
+ }
+
+ err = GenerateDataFlowDiagramGraphvizImage(dotFile, config.OutputFolder,
+ config.TempFolder, config.BinFolder, config.DataFlowDiagramFilenamePNG, progressReporter)
+ if err != nil {
+ progressReporter.Warn(err)
+ }
+ }
+ // Data Asset Diagram rendering
+ if generateDataAssetsDiagram {
+ gvFile := filepath.Join(config.OutputFolder, config.DataAssetDiagramFilenameDOT)
+ if !config.KeepDiagramSourceFiles {
+ tmpFile, err := os.CreateTemp(config.TempFolder, config.DataAssetDiagramFilenameDOT)
+ if err != nil {
+ return err
+ }
+ gvFile = tmpFile.Name()
+ defer func() { _ = os.Remove(gvFile) }()
+ }
+ dotFile, err := WriteDataAssetDiagramGraphvizDOT(readResult.ParsedModel, gvFile, diagramDPI, progressReporter)
+ if err != nil {
+ return fmt.Errorf("error while generating data asset diagram: %s", err)
+ }
+ err = GenerateDataAssetDiagramGraphvizImage(dotFile, config.OutputFolder,
+ config.TempFolder, config.BinFolder, config.DataAssetDiagramFilenamePNG, progressReporter)
+ if err != nil {
+ progressReporter.Warn(err)
+ }
+ }
+
+ // risks as risks json
+ if commands.RisksJSON {
+ progressReporter.Info("Writing risks json")
+ err := WriteRisksJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonRisksFilename))
+ if err != nil {
+ return fmt.Errorf("error while writing risks json: %s", err)
+ }
+ }
+
+ // technical assets json
+ if commands.TechnicalAssetsJSON {
+ progressReporter.Info("Writing technical assets json")
+ err := WriteTechnicalAssetsJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonTechnicalAssetsFilename))
+ if err != nil {
+ return fmt.Errorf("error while writing technical assets json: %s", err)
+ }
+ }
+
+ // risks as risks json
+ if commands.StatsJSON {
+ progressReporter.Info("Writing stats json")
+ err := WriteStatsJSON(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.JsonStatsFilename))
+ if err != nil {
+ return fmt.Errorf("error while writing stats json: %s", err)
+ }
+ }
+
+ // risks Excel
+ if commands.RisksExcel {
+ progressReporter.Info("Writing risks excel")
+ err := WriteRisksExcelToFile(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.ExcelRisksFilename))
+ if err != nil {
+ return err
+ }
+ }
+
+ // tags Excel
+ if commands.TagsExcel {
+ progressReporter.Info("Writing tags excel")
+ err := WriteTagsExcelToFile(readResult.ParsedModel, filepath.Join(config.OutputFolder, config.ExcelTagsFilename))
+ if err != nil {
+ return err
+ }
+ }
+
+ if commands.ReportPDF {
+ // hash the YAML input file
+ f, err := os.Open(config.InputFile)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+ hasher := sha256.New()
+ if _, err := io.Copy(hasher, f); err != nil {
+ return err
+ }
+ modelHash := hex.EncodeToString(hasher.Sum(nil))
+ // report PDF
+ progressReporter.Info("Writing report pdf")
+
+ pdfReporter := pdfReporter{}
+ err = pdfReporter.WriteReportPDF(filepath.Join(config.OutputFolder, config.ReportFilename),
+ filepath.Join(config.AppFolder, config.TemplateFilename),
+ filepath.Join(config.OutputFolder, config.DataFlowDiagramFilenamePNG),
+ filepath.Join(config.OutputFolder, config.DataAssetDiagramFilenamePNG),
+ config.InputFile,
+ config.SkipRiskRules,
+ config.BuildTimestamp,
+ modelHash,
+ readResult.IntroTextRAA,
+ readResult.CustomRiskRules,
+ config.TempFolder,
+ readResult.ParsedModel)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type progressReporter interface {
+ Info(a ...any)
+ Warn(a ...any)
+ Error(a ...any)
+}
+
+func contains(a []string, x string) bool {
+ for _, n := range a {
+ if x == n {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/report/graphviz.go b/pkg/report/graphviz.go
new file mode 100644
index 00000000..7ffc2d18
--- /dev/null
+++ b/pkg/report/graphviz.go
@@ -0,0 +1,603 @@
+package report
+
+import (
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+func WriteDataFlowDiagramGraphvizDOT(parsedModel *types.ParsedModel,
+ diagramFilenameDOT string, dpi int, addModelTitle bool,
+ progressReporter progressReporter) (*os.File, error) {
+ progressReporter.Info("Writing data flow diagram input")
+
+ var dotContent strings.Builder
+ dotContent.WriteString("digraph generatedModel { concentrate=false \n")
+
+ // Metadata init ===============================================================================
+ tweaks := ""
+ if parsedModel.DiagramTweakNodesep > 0 {
+ tweaks += "\n nodesep=\"" + strconv.Itoa(parsedModel.DiagramTweakNodesep) + "\""
+ }
+ if parsedModel.DiagramTweakRanksep > 0 {
+ tweaks += "\n ranksep=\"" + strconv.Itoa(parsedModel.DiagramTweakRanksep) + "\""
+ }
+ suppressBidirectionalArrows := true
+ drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks := true
+ splines := "ortho"
+ if len(parsedModel.DiagramTweakEdgeLayout) > 0 {
+ switch parsedModel.DiagramTweakEdgeLayout {
+ case "spline":
+ splines = "spline"
+ drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false
+ case "polyline":
+ splines = "polyline"
+ drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false
+ case "ortho":
+ splines = "ortho"
+ suppressBidirectionalArrows = true
+ case "curved":
+ splines = "curved"
+ drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false
+ case "false":
+ splines = "false"
+ drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks = false
+ default:
+ return nil, fmt.Errorf("unknown value for diagram_tweak_suppress_edge_labels (spline, polyline, ortho, curved, false): %s", parsedModel.DiagramTweakEdgeLayout)
+ }
+ }
+ rankdir := "TB"
+ if parsedModel.DiagramTweakLayoutLeftToRight {
+ rankdir = "LR"
+ }
+ modelTitle := ""
+ if addModelTitle {
+ modelTitle = `label="` + parsedModel.Title + `"`
+ }
+ dotContent.WriteString(` graph [ ` + modelTitle + `
+ labelloc=t
+ fontname="Verdana"
+ fontsize=40
+ outputorder="nodesfirst"
+ dpi=` + strconv.Itoa(dpi) + `
+ splines=` + splines + `
+ rankdir="` + rankdir + `"
+` + tweaks + `
+ ];
+ node [
+ fontname="Verdana"
+ fontsize="20"
+ ];
+ edge [
+ shape="none"
+ fontname="Verdana"
+ fontsize="18"
+ ];
+`)
+
+ // Trust Boundaries ===============================================================================
+ var subgraphSnippetsById = make(map[string]string)
+ // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order
+ // range over them in sorted (hence re-producible) way:
+ keys := make([]string, 0)
+ for k := range parsedModel.TrustBoundaries {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ trustBoundary := parsedModel.TrustBoundaries[key]
+ var snippet strings.Builder
+ if len(trustBoundary.TechnicalAssetsInside) > 0 || len(trustBoundary.TrustBoundariesNested) > 0 {
+ if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks {
+ // see https://stackoverflow.com/questions/17247455/how-do-i-add-extra-space-between-clusters?noredirect=1&lq=1
+ snippet.WriteString("\n subgraph cluster_space_boundary_for_layout_only_1" + hash(trustBoundary.Id) + " {\n")
+ snippet.WriteString(` graph [
+ dpi=` + strconv.Itoa(dpi) + `
+ label=<>
+ fontsize="21"
+ style="invis"
+ color="green"
+ fontcolor="green"
+ margin="50.0"
+ penwidth="6.5"
+ outputorder="nodesfirst"
+ ];`)
+ }
+ snippet.WriteString("\n subgraph cluster_" + hash(trustBoundary.Id) + " {\n")
+ color, fontColor, bgColor, style, fontname := RgbHexColorTwilight(), RgbHexColorTwilight() /*"#550E0C"*/, "#FAFAFA", "dashed", "Verdana"
+ penWidth := 4.5
+ if len(trustBoundary.TrustBoundariesNested) > 0 {
+ //color, fontColor, style, fontname = Blue, Blue, "dashed", "Verdana"
+ penWidth = 5.5
+ }
+ if len(trustBoundary.ParentTrustBoundaryID(parsedModel)) > 0 {
+ bgColor = "#F1F1F1"
+ }
+ if trustBoundary.Type == types.NetworkPolicyNamespaceIsolation {
+ fontColor, bgColor = "#222222", "#DFF4FF"
+ }
+ if trustBoundary.Type == types.ExecutionEnvironment {
+ fontColor, bgColor, style = "#555555", "#FFFFF0", "dotted"
+ }
+ snippet.WriteString(` graph [
+ dpi=` + strconv.Itoa(dpi) + `
+ label=<` + trustBoundary.Title + ` (` + trustBoundary.Type.String() + `) |
>
+ fontsize="21"
+ style="` + style + `"
+ color="` + color + `"
+ bgcolor="` + bgColor + `"
+ fontcolor="` + fontColor + `"
+ fontname="` + fontname + `"
+ penwidth="` + fmt.Sprintf("%f", penWidth) + `"
+ forcelabels=true
+ outputorder="nodesfirst"
+ margin="50.0"
+ ];`)
+ snippet.WriteString("\n")
+ keys := trustBoundary.TechnicalAssetsInside
+ sort.Strings(keys)
+ for _, technicalAssetInside := range keys {
+ //log.Println("About to add technical asset link to trust boundary: ", technicalAssetInside)
+ technicalAsset := parsedModel.TechnicalAssets[technicalAssetInside]
+ snippet.WriteString(hash(technicalAsset.Id))
+ snippet.WriteString(";\n")
+ }
+ keys = trustBoundary.TrustBoundariesNested
+ sort.Strings(keys)
+ for _, trustBoundaryNested := range keys {
+ //log.Println("About to add nested trust boundary to trust boundary: ", trustBoundaryNested)
+ trustBoundaryNested := parsedModel.TrustBoundaries[trustBoundaryNested]
+ snippet.WriteString("LINK-NEEDS-REPLACED-BY-cluster_" + hash(trustBoundaryNested.Id))
+ snippet.WriteString(";\n")
+ }
+ snippet.WriteString(" }\n\n")
+ if drawSpaceLinesForLayoutUnfortunatelyFurtherSeparatesAllRanks {
+ snippet.WriteString(" }\n\n")
+ }
+ }
+ subgraphSnippetsById[hash(trustBoundary.Id)] = snippet.String()
+ }
+ // here replace links and remove from map after replacement (i.e. move snippet into nested)
+ for i := range subgraphSnippetsById {
+ re := regexp.MustCompile(`LINK-NEEDS-REPLACED-BY-cluster_([0-9]*);`)
+ for {
+ matches := re.FindStringSubmatch(subgraphSnippetsById[i])
+ if len(matches) > 0 {
+ embeddedSnippet := " //nested:" + subgraphSnippetsById[matches[1]]
+ subgraphSnippetsById[i] = strings.ReplaceAll(subgraphSnippetsById[i], matches[0], embeddedSnippet)
+ subgraphSnippetsById[matches[1]] = "" // to something like remove it
+ } else {
+ break
+ }
+ }
+ }
+ // now write them all
+ keys = make([]string, 0)
+ for k := range subgraphSnippetsById {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ snippet := subgraphSnippetsById[key]
+ dotContent.WriteString(snippet)
+ }
+
+ // Technical Assets ===============================================================================
+ // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order
+ // range over them in sorted (hence re-producible) way:
+ // Convert map to slice of values:
+ var techAssets []types.TechnicalAsset
+ for _, techAsset := range parsedModel.TechnicalAssets {
+ techAssets = append(techAssets, techAsset)
+ }
+ sort.Sort(types.ByOrderAndIdSort(techAssets))
+ for _, technicalAsset := range techAssets {
+ dotContent.WriteString(makeTechAssetNode(parsedModel, technicalAsset, false))
+ dotContent.WriteString("\n")
+ }
+
+ // Data Flows (Technical Communication Links) ===============================================================================
+ for _, technicalAsset := range techAssets {
+ for _, dataFlow := range technicalAsset.CommunicationLinks {
+ sourceId := technicalAsset.Id
+ targetId := dataFlow.TargetId
+ //log.Println("About to add link from", sourceId, "to", targetId, "with id", dataFlow.Id)
+ var arrowStyle, arrowColor, readOrWriteHead, readOrWriteTail string
+ if dataFlow.Readonly {
+ readOrWriteHead = "empty"
+ readOrWriteTail = "odot"
+ } else {
+ readOrWriteHead = "normal"
+ readOrWriteTail = "dot"
+ }
+ dir := "forward"
+ if dataFlow.IsBidirectional() {
+ if !suppressBidirectionalArrows { // as it does not work as bug in graphviz with ortho: https://gitlab.com/graphviz/graphviz/issues/144
+ dir = "both"
+ }
+ }
+ arrowStyle = ` style="` + determineArrowLineStyle(dataFlow) + `" penwidth="` + determineArrowPenWidth(dataFlow, parsedModel) + `" arrowtail="` + readOrWriteTail + `" arrowhead="` + readOrWriteHead + `" dir="` + dir + `" arrowsize="2.0" `
+ arrowColor = ` color="` + determineArrowColor(dataFlow, parsedModel) + `"`
+ tweaks := ""
+ if dataFlow.DiagramTweakWeight > 0 {
+ tweaks += " weight=\"" + strconv.Itoa(dataFlow.DiagramTweakWeight) + "\" "
+ }
+
+ dotContent.WriteString("\n")
+ dotContent.WriteString(" " + hash(sourceId) + " -> " + hash(targetId) +
+ ` [` + arrowColor + ` ` + arrowStyle + tweaks + ` constraint=` + strconv.FormatBool(dataFlow.DiagramTweakConstraint) + ` `)
+ if !parsedModel.DiagramTweakSuppressEdgeLabels {
+ dotContent.WriteString(` xlabel="` + encode(dataFlow.Protocol.String()) + `" fontcolor="` + determineLabelColor(dataFlow, parsedModel) + `" `)
+ }
+ dotContent.WriteString(" ];\n")
+ }
+ }
+
+ diagramInvisibleConnectionsTweaks, err := makeDiagramInvisibleConnectionsTweaks(parsedModel)
+ if err != nil {
+ return nil, fmt.Errorf("error while making diagram invisible connections tweaks: %s", err)
+ }
+ dotContent.WriteString(diagramInvisibleConnectionsTweaks)
+
+ diagramSameRankNodeTweaks, err := makeDiagramSameRankNodeTweaks(parsedModel)
+ if err != nil {
+ return nil, fmt.Errorf("error while making diagram same-rank node tweaks: %s", err)
+ }
+ dotContent.WriteString(diagramSameRankNodeTweaks)
+
+ dotContent.WriteString("}")
+
+ //fmt.Println(dotContent.String())
+
+ // Write the DOT file
+ file, err := os.Create(filepath.Clean(diagramFilenameDOT))
+ if err != nil {
+ return nil, fmt.Errorf("Error creating %s: %v", diagramFilenameDOT, err)
+ }
+ defer func() { _ = file.Close() }()
+ _, err = fmt.Fprintln(file, dotContent.String())
+ if err != nil {
+ return nil, fmt.Errorf("Error writing %s: %v", diagramFilenameDOT, err)
+ }
+ return file, nil
+}
+
+func GenerateDataFlowDiagramGraphvizImage(dotFile *os.File, targetDir string,
+ tempFolder, binFolder, dataFlowDiagramFilenamePNG string, progressReporter progressReporter) error {
+ progressReporter.Info("Rendering data flow diagram input")
+ // tmp files
+ tmpFileDOT, err := os.CreateTemp(tempFolder, "diagram-*-.gv")
+ if err != nil {
+ return fmt.Errorf("Error creating temp file: %v", err)
+ }
+ defer func() { _ = os.Remove(tmpFileDOT.Name()) }()
+
+ tmpFilePNG, err := os.CreateTemp(tempFolder, "diagram-*-.png")
+ if err != nil {
+ return fmt.Errorf("Error creating temp file: %v", err)
+ }
+ defer func() { _ = os.Remove(tmpFilePNG.Name()) }()
+
+ // copy into tmp file as input
+ inputDOT, err := os.ReadFile(dotFile.Name())
+ if err != nil {
+ return fmt.Errorf("Error reading %s: %v", dotFile.Name(), err)
+ }
+ err = os.WriteFile(tmpFileDOT.Name(), inputDOT, 0600)
+ if err != nil {
+ return fmt.Errorf("Error creating %s: %v", tmpFileDOT.Name(), err)
+ }
+
+ // exec
+
+ cmd := exec.Command("dot", "-Tpng", tmpFileDOT.Name(), "-o", tmpFilePNG.Name()) // #nosec G204
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return errors.New("graph rendering call failed with error: " + err.Error())
+ }
+ // copy into resulting file
+ inputPNG, err := os.ReadFile(tmpFilePNG.Name())
+ if err != nil {
+ return fmt.Errorf("Error copying into resulting file %s: %v", tmpFilePNG.Name(), err)
+ }
+ err = os.WriteFile(filepath.Join(targetDir, dataFlowDiagramFilenamePNG), inputPNG, 0600)
+ if err != nil {
+ return fmt.Errorf("Error creating %s: %v", filepath.Join(targetDir, dataFlowDiagramFilenamePNG), err)
+ }
+ return nil
+}
+
+func makeDiagramSameRankNodeTweaks(parsedModel *types.ParsedModel) (string, error) {
+ // see https://stackoverflow.com/questions/25734244/how-do-i-place-nodes-on-the-same-level-in-dot
+ tweak := ""
+ if len(parsedModel.DiagramTweakSameRankAssets) > 0 {
+ for _, sameRank := range parsedModel.DiagramTweakSameRankAssets {
+ assetIDs := strings.Split(sameRank, ":")
+ if len(assetIDs) > 0 {
+ tweak += "{ rank=same; "
+ for _, id := range assetIDs {
+ err := parsedModel.CheckTechnicalAssetExists(id, "diagram tweak same-rank", true)
+ if err != nil {
+ return "", fmt.Errorf("error while checking technical asset existence: %s", err)
+ }
+ if len(parsedModel.TechnicalAssets[id].GetTrustBoundaryId(parsedModel)) > 0 {
+ return "", fmt.Errorf("technical assets (referenced in same rank diagram tweak) are inside trust boundaries: " +
+ fmt.Sprintf("%v", parsedModel.DiagramTweakSameRankAssets))
+ }
+ tweak += " " + hash(id) + "; "
+ }
+ tweak += " }"
+ }
+ }
+ }
+ return tweak, nil
+}
+
+func makeDiagramInvisibleConnectionsTweaks(parsedModel *types.ParsedModel) (string, error) {
+ // see https://stackoverflow.com/questions/2476575/how-to-control-node-placement-in-graphviz-i-e-avoid-edge-crossings
+ tweak := ""
+ if len(parsedModel.DiagramTweakInvisibleConnectionsBetweenAssets) > 0 {
+ for _, invisibleConnections := range parsedModel.DiagramTweakInvisibleConnectionsBetweenAssets {
+ assetIDs := strings.Split(invisibleConnections, ":")
+ if len(assetIDs) == 2 {
+ err := parsedModel.CheckTechnicalAssetExists(assetIDs[0], "diagram tweak connections", true)
+ if err != nil {
+ return "", fmt.Errorf("error while checking technical asset existence: %s", err)
+ }
+ err = parsedModel.CheckTechnicalAssetExists(assetIDs[1], "diagram tweak connections", true)
+ if err != nil {
+ return "", fmt.Errorf("error while checking technical asset existence: %s", err)
+ }
+
+ tweak += "\n" + hash(assetIDs[0]) + " -> " + hash(assetIDs[1]) + " [style=invis]; \n"
+ }
+ }
+ }
+ return tweak, nil
+}
+
+func WriteDataAssetDiagramGraphvizDOT(parsedModel *types.ParsedModel, diagramFilenameDOT string, dpi int,
+ progressReporter progressReporter) (*os.File, error) {
+ progressReporter.Info("Writing data asset diagram input")
+
+ var dotContent strings.Builder
+ dotContent.WriteString("digraph generatedModel { concentrate=true \n")
+
+ // Metadata init ===============================================================================
+ dotContent.WriteString(` graph [
+ dpi=` + strconv.Itoa(dpi) + `
+ fontname="Verdana"
+ labelloc="c"
+ fontsize="20"
+ splines=false
+ rankdir="LR"
+ nodesep=1.0
+ ranksep=3.0
+ outputorder="nodesfirst"
+ ];
+ node [
+ fontcolor="white"
+ fontname="Verdana"
+ fontsize="20"
+ ];
+ edge [
+ shape="none"
+ fontname="Verdana"
+ fontsize="18"
+ ];
+`)
+
+ // Technical Assets ===============================================================================
+ techAssets := make([]types.TechnicalAsset, 0)
+ for _, techAsset := range parsedModel.TechnicalAssets {
+ techAssets = append(techAssets, techAsset)
+ }
+ sort.Sort(types.ByOrderAndIdSort(techAssets))
+ for _, technicalAsset := range techAssets {
+ if len(technicalAsset.DataAssetsStored) > 0 || len(technicalAsset.DataAssetsProcessed) > 0 {
+ dotContent.WriteString(makeTechAssetNode(parsedModel, technicalAsset, true))
+ dotContent.WriteString("\n")
+ }
+ }
+
+ // Data Assets ===============================================================================
+ dataAssets := make([]types.DataAsset, 0)
+ for _, dataAsset := range parsedModel.DataAssets {
+ dataAssets = append(dataAssets, dataAsset)
+ }
+
+ types.SortByDataAssetDataBreachProbabilityAndTitle(parsedModel, dataAssets)
+ for _, dataAsset := range dataAssets {
+ dotContent.WriteString(makeDataAssetNode(parsedModel, dataAsset))
+ dotContent.WriteString("\n")
+ }
+
+ // Data Asset to Tech Asset links ===============================================================================
+ for _, technicalAsset := range techAssets {
+ for _, sourceId := range technicalAsset.DataAssetsStored {
+ targetId := technicalAsset.Id
+ dotContent.WriteString("\n")
+ dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) +
+ ` [ color="blue" style="solid" ];`)
+ dotContent.WriteString("\n")
+ }
+ for _, sourceId := range technicalAsset.DataAssetsProcessed {
+ if !contains(technicalAsset.DataAssetsStored, sourceId) { // here only if not already drawn above
+ targetId := technicalAsset.Id
+ dotContent.WriteString("\n")
+ dotContent.WriteString(hash(sourceId) + " -> " + hash(targetId) +
+ ` [ color="#666666" style="dashed" ];`)
+ dotContent.WriteString("\n")
+ }
+ }
+ }
+
+ dotContent.WriteString("}")
+
+ // Write the DOT file
+ file, err := os.Create(filepath.Clean(diagramFilenameDOT))
+ if err != nil {
+ return nil, fmt.Errorf("Error creating %s: %v", diagramFilenameDOT, err)
+ }
+ defer func() { _ = file.Close() }()
+ _, err = fmt.Fprintln(file, dotContent.String())
+ if err != nil {
+ return nil, fmt.Errorf("Error writing %s: %v", diagramFilenameDOT, err)
+ }
+ return file, nil
+}
+
+func makeDataAssetNode(parsedModel *types.ParsedModel, dataAsset types.DataAsset) string {
+ var color string
+ switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) {
+ case types.Probable:
+ color = RgbHexColorHighRisk()
+ case types.Possible:
+ color = RgbHexColorMediumRisk()
+ case types.Improbable:
+ color = RgbHexColorLowRisk()
+ default:
+ color = "#444444" // since black is too dark here as fill color
+ }
+ if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) {
+ color = "#444444" // since black is too dark here as fill color
+ }
+ return " " + hash(dataAsset.Id) + ` [ label=<` + encode(dataAsset.Title) + `> penwidth="3.0" style="filled" fillcolor="` + color + `" color="` + color + "\"\n ]; "
+}
+
+func makeTechAssetNode(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, simplified bool) string {
+ if simplified {
+ color := RgbHexColorOutOfScope()
+ if !technicalAsset.OutOfScope {
+ generatedRisks := technicalAsset.GeneratedRisks(parsedModel)
+ switch types.HighestSeverityStillAtRisk(parsedModel, generatedRisks) {
+ case types.CriticalSeverity:
+ color = RgbHexColorCriticalRisk()
+ case types.HighSeverity:
+ color = RgbHexColorHighRisk()
+ case types.ElevatedSeverity:
+ color = RgbHexColorElevatedRisk()
+ case types.MediumSeverity:
+ color = RgbHexColorMediumRisk()
+ case types.LowSeverity:
+ color = RgbHexColorLowRisk()
+ default:
+ color = "#444444" // since black is too dark here as fill color
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, generatedRisks)) == 0 {
+ color = "#444444" // since black is too dark here as fill color
+ }
+ }
+ return " " + hash(technicalAsset.Id) + ` [ shape="box" style="filled" fillcolor="` + color + `"
+ label=<` + encode(technicalAsset.Title) + `> penwidth="3.0" color="` + color + `" ];
+ `
+ } else {
+ var shape, title string
+ var lineBreak = ""
+ switch technicalAsset.Type {
+ case types.ExternalEntity:
+ shape = "box"
+ title = technicalAsset.Title
+ case types.Process:
+ shape = "ellipse"
+ title = technicalAsset.Title
+ case types.Datastore:
+ shape = "cylinder"
+ title = technicalAsset.Title
+ if technicalAsset.Redundant {
+ lineBreak = "
"
+ }
+ }
+
+ if technicalAsset.UsedAsClientByHuman {
+ shape = "octagon"
+ }
+
+ // RAA = Relative Attacker Attractiveness
+ raa := technicalAsset.RAA
+ var attackerAttractivenessLabel string
+ if technicalAsset.OutOfScope {
+ attackerAttractivenessLabel = "RAA: out of scope"
+ } else {
+ attackerAttractivenessLabel = "RAA: " + fmt.Sprintf("%.0f", raa) + " %"
+ }
+
+ compartmentBorder := "0"
+ if technicalAsset.MultiTenant {
+ compartmentBorder = "1"
+ }
+
+ return " " + hash(technicalAsset.Id) + ` [
+ label=<` + lineBreak + technicalAsset.Technology.String() + ` ` + technicalAsset.Size.String() + ` |
` + encode(title) + `
|
` + attackerAttractivenessLabel + ` |
>
+ shape=` + shape + ` style="` + determineShapeBorderLineStyle(technicalAsset) + `,` + determineShapeStyle(technicalAsset) + `" penwidth="` + determineShapeBorderPenWidth(technicalAsset, parsedModel) + `" fillcolor="` + determineShapeFillColor(technicalAsset, parsedModel) + `"
+ peripheries=` + strconv.Itoa(determineShapePeripheries(technicalAsset)) + `
+ color="` + determineShapeBorderColor(technicalAsset, parsedModel) + "\"\n ]; "
+ }
+}
+
+func GenerateDataAssetDiagramGraphvizImage(dotFile *os.File, targetDir string,
+ tempFolder, binFolder, dataAssetDiagramFilenamePNG string, progressReporter progressReporter) error { // TODO dedupe with other render...() method here
+ progressReporter.Info("Rendering data asset diagram input")
+ // tmp files
+ tmpFileDOT, err := os.CreateTemp(tempFolder, "diagram-*-.gv")
+ if err != nil {
+ return fmt.Errorf("Error creating temp file: %v", err)
+ }
+ defer func() { _ = os.Remove(tmpFileDOT.Name()) }()
+
+ tmpFilePNG, err := os.CreateTemp(tempFolder, "diagram-*-.png")
+ if err != nil {
+ return fmt.Errorf("Error creating temp file: %v", err)
+ }
+ defer func() { _ = os.Remove(tmpFilePNG.Name()) }()
+
+ // copy into tmp file as input
+ inputDOT, err := os.ReadFile(dotFile.Name())
+ if err != nil {
+ return fmt.Errorf("Error reading %s: %v", dotFile.Name(), err)
+ }
+ err = os.WriteFile(tmpFileDOT.Name(), inputDOT, 0600)
+ if err != nil {
+ return fmt.Errorf("Error creating %s: %v", tmpFileDOT.Name(), err)
+ }
+
+ // exec
+ cmd := exec.Command("dot", "-Tpng", tmpFileDOT.Name(), "-o", tmpFilePNG.Name()) // #nosec G204
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return errors.New("graph rendering call failed with error: " + err.Error())
+ }
+ // copy into resulting file
+ inputPNG, err := os.ReadFile(tmpFilePNG.Name())
+ if err != nil {
+ return fmt.Errorf("Error copying into resulting file %s: %v", tmpFilePNG.Name(), err)
+ }
+ err = os.WriteFile(filepath.Join(targetDir, dataAssetDiagramFilenamePNG), inputPNG, 0600)
+ if err != nil {
+ return fmt.Errorf("Error creating %s: %v", filepath.Join(targetDir, dataAssetDiagramFilenamePNG), err)
+ }
+ return nil
+}
+
+func hash(s string) string {
+ h := fnv.New32a()
+ _, _ = h.Write([]byte(s))
+ return fmt.Sprintf("%v", h.Sum32())
+}
+
+func encode(value string) string {
+ return strings.ReplaceAll(value, "&", "&")
+}
diff --git a/pkg/report/json.go b/pkg/report/json.go
new file mode 100644
index 00000000..8a27d655
--- /dev/null
+++ b/pkg/report/json.go
@@ -0,0 +1,56 @@
+package report
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+func WriteRisksJSON(parsedModel *types.ParsedModel, filename string) error {
+ /*
+ remainingRisks := make([]model.Risk, 0)
+ for _, category := range model.SortedRiskCategories() {
+ risks := model.SortedRisksOfCategory(category)
+ for _, risk := range model.ReduceToOnlyStillAtRisk(risks) {
+ remainingRisks = append(remainingRisks, risk)
+ }
+ }
+ */
+ jsonBytes, err := json.Marshal(types.AllRisks(parsedModel))
+ if err != nil {
+ return fmt.Errorf("failed to marshal risks to JSON: %w", err)
+ }
+ err = os.WriteFile(filename, jsonBytes, 0600)
+ if err != nil {
+ return fmt.Errorf("failed to write risks to JSON file: %w", err)
+ }
+ return nil
+}
+
+// TODO: also a "data assets" json?
+
+func WriteTechnicalAssetsJSON(parsedModel *types.ParsedModel, filename string) error {
+ jsonBytes, err := json.Marshal(parsedModel.TechnicalAssets)
+ if err != nil {
+ return fmt.Errorf("failed to marshal technical assets to JSON: %w", err)
+ }
+ err = os.WriteFile(filename, jsonBytes, 0600)
+ if err != nil {
+ return fmt.Errorf("failed to write technical assets to JSON file: %w", err)
+ }
+ return nil
+}
+
+func WriteStatsJSON(parsedModel *types.ParsedModel, filename string) error {
+ jsonBytes, err := json.Marshal(types.OverallRiskStatistics(parsedModel))
+ if err != nil {
+ return fmt.Errorf("failed to marshal stats to JSON: %w", err)
+ }
+ err = os.WriteFile(filename, jsonBytes, 0600)
+ if err != nil {
+ return fmt.Errorf("failed to write stats to JSON file: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/report/report.go b/pkg/report/report.go
new file mode 100644
index 00000000..907056f2
--- /dev/null
+++ b/pkg/report/report.go
@@ -0,0 +1,4601 @@
+package report
+
+import (
+ "fmt"
+ "image"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/jung-kurt/gofpdf"
+ "github.com/jung-kurt/gofpdf/contrib/gofpdi"
+ "github.com/threagile/threagile/pkg/docs"
+ "github.com/threagile/threagile/pkg/model"
+ "github.com/threagile/threagile/pkg/security/risks"
+ "github.com/threagile/threagile/pkg/security/types"
+ "github.com/wcharczuk/go-chart"
+ "github.com/wcharczuk/go-chart/drawing"
+)
+
+const fontSizeHeadline, fontSizeHeadlineSmall, fontSizeBody, fontSizeSmall, fontSizeVerySmall = 20, 16, 12, 9, 7
+const /*dataFlowDiagramFullscreen,*/ allowedPdfLandscapePages, embedDiagramLegendPage = /*false,*/ true, false
+
+type pdfReporter struct {
+ isLandscapePage bool
+ pdf *gofpdf.Fpdf
+ coverTemplateId int
+ contentTemplateId int
+ diagramLegendTemplateId int
+ pageNo int
+ linkCounter int
+ tocLinkIdByAssetId map[string]int
+ homeLink int
+ currentChapterTitleBreadcrumb string
+}
+
+func (r *pdfReporter) initReport() {
+ r.pdf = nil
+ r.isLandscapePage = false
+ r.pageNo = 0
+ r.linkCounter = 0
+ r.homeLink = 0
+ r.currentChapterTitleBreadcrumb = ""
+ r.tocLinkIdByAssetId = make(map[string]int)
+}
+
+func (r *pdfReporter) WriteReportPDF(reportFilename string,
+ templateFilename string,
+ dataFlowDiagramFilenamePNG string,
+ dataAssetDiagramFilenamePNG string,
+ modelFilename string,
+ skipRiskRules string,
+ buildTimestamp string,
+ modelHash string,
+ introTextRAA string,
+ customRiskRules map[string]*model.CustomRisk,
+ tempFolder string,
+ model *types.ParsedModel) error {
+ defer func() {
+ value := recover()
+ if value != nil {
+ fmt.Printf("error creating PDF report: %v", value)
+ }
+ }()
+
+ r.initReport()
+ r.createPdfAndInitMetadata(model)
+ r.parseBackgroundTemplate(templateFilename)
+ r.createCover(model)
+ r.createTableOfContents(model)
+ err := r.createManagementSummary(model, tempFolder)
+ if err != nil {
+ return fmt.Errorf("error creating management summary: %w", err)
+ }
+ r.createImpactInitialRisks(model)
+ err = r.createRiskMitigationStatus(model, tempFolder)
+ if err != nil {
+ return fmt.Errorf("error creating risk mitigation status: %w", err)
+ }
+ r.createImpactRemainingRisks(model)
+ err = r.createTargetDescription(model, filepath.Dir(modelFilename))
+ if err != nil {
+ return fmt.Errorf("error creating target description: %w", err)
+ }
+ r.embedDataFlowDiagram(dataFlowDiagramFilenamePNG, tempFolder)
+ r.createSecurityRequirements(model)
+ r.createAbuseCases(model)
+ r.createTagListing(model)
+ r.createSTRIDE(model)
+ r.createAssignmentByFunction(model)
+ r.createRAA(model, introTextRAA)
+ r.embedDataRiskMapping(dataAssetDiagramFilenamePNG, tempFolder)
+ //createDataRiskQuickWins()
+ r.createOutOfScopeAssets(model)
+ r.createModelFailures(model)
+ r.createQuestions(model)
+ r.createRiskCategories(model)
+ r.createTechnicalAssets(model)
+ r.createDataAssets(model)
+ r.createTrustBoundaries(model)
+ r.createSharedRuntimes(model)
+ r.createRiskRulesChecked(model, modelFilename, skipRiskRules, buildTimestamp, modelHash, customRiskRules)
+ r.createDisclaimer(model)
+ err = r.writeReportToFile(reportFilename)
+ if err != nil {
+ return fmt.Errorf("error writing report to file: %w", err)
+ }
+ return nil
+}
+
+func (r *pdfReporter) createPdfAndInitMetadata(model *types.ParsedModel) {
+ r.pdf = gofpdf.New("P", "mm", "A4", "")
+ r.pdf.SetCreator(model.Author.Homepage, true)
+ r.pdf.SetAuthor(model.Author.Name, true)
+ r.pdf.SetTitle("Threat Model Report: "+model.Title, true)
+ r.pdf.SetSubject("Threat Model Report: "+model.Title, true)
+ // r.pdf.SetPageBox("crop", 0, 0, 100, 010)
+ r.pdf.SetHeaderFunc(func() {
+ if r.isLandscapePage {
+ return
+ }
+
+ gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300)
+ r.pdf.SetTopMargin(35)
+ })
+ r.pdf.SetFooterFunc(func() {
+ r.addBreadcrumb(model)
+ r.pdf.SetFont("Helvetica", "", 10)
+ r.pdf.SetTextColor(127, 127, 127)
+ r.pdf.Text(8.6, 284, "Threat Model Report via Threagile") //: "+parsedModel.Title)
+ r.pdf.Link(8.4, 281, 54.6, 4, r.homeLink)
+ r.pageNo++
+ text := "Page " + strconv.Itoa(r.pageNo)
+ if r.pageNo < 10 {
+ text = " " + text
+ } else if r.pageNo < 100 {
+ text = " " + text
+ }
+ if r.pageNo > 1 {
+ r.pdf.Text(186, 284, text)
+ }
+ })
+ r.linkCounter = 1 // link counting starts at 1 via r.pdf.AddLink
+}
+
+func (r *pdfReporter) addBreadcrumb(parsedModel *types.ParsedModel) {
+ if len(r.currentChapterTitleBreadcrumb) > 0 {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetFont("Helvetica", "", 10)
+ r.pdf.SetTextColor(127, 127, 127)
+ r.pdf.Text(46.7, 24.5, uni(r.currentChapterTitleBreadcrumb+" - "+parsedModel.Title))
+ }
+}
+
+func (r *pdfReporter) parseBackgroundTemplate(templateFilename string) {
+ /*
+ imageBox, err := rice.FindBox("template")
+ checkErr(err)
+ file, err := os.CreateTemp("", "background-*-.r.pdf")
+ checkErr(err)
+ defer os.Remove(file.Name())
+ backgroundBytes := imageBox.MustBytes("background.r.pdf")
+ err = os.WriteFile(file.Name(), backgroundBytes, 0644)
+ checkErr(err)
+ */
+ r.coverTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 1, "/MediaBox")
+ r.contentTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 2, "/MediaBox")
+ r.diagramLegendTemplateId = gofpdi.ImportPage(r.pdf, templateFilename, 3, "/MediaBox")
+}
+
+func (r *pdfReporter) createCover(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.AddPage()
+ gofpdi.UseImportedTemplate(r.pdf, r.coverTemplateId, 0, 0, 0, 300)
+ r.pdf.SetFont("Helvetica", "B", 28)
+ r.pdf.SetTextColor(0, 0, 0)
+ r.pdf.Text(40, 110, "Threat Model Report")
+ r.pdf.Text(40, 125, uni(parsedModel.Title))
+ r.pdf.SetFont("Helvetica", "", 12)
+ reportDate := parsedModel.Date
+ if reportDate.IsZero() {
+ reportDate = types.Date{Time: time.Now()}
+ }
+ r.pdf.Text(40.7, 145, reportDate.Format("2 January 2006"))
+ r.pdf.Text(40.7, 153, uni(parsedModel.Author.Name))
+ r.pdf.SetFont("Helvetica", "", 10)
+ r.pdf.SetTextColor(80, 80, 80)
+ r.pdf.Text(8.6, 275, parsedModel.Author.Homepage)
+ r.pdf.SetFont("Helvetica", "", 12)
+ r.pdf.SetTextColor(0, 0, 0)
+}
+
+func (r *pdfReporter) createTableOfContents(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.AddPage()
+ r.currentChapterTitleBreadcrumb = "Table of Contents"
+ r.homeLink = r.pdf.AddLink()
+ r.defineLinkTarget("{home}")
+ gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300)
+ r.pdf.SetFont("Helvetica", "B", fontSizeHeadline)
+ r.pdf.Text(11, 40, "Table of Contents")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetY(46)
+
+ r.pdf.SetLineWidth(0.25)
+ r.pdf.SetDrawColor(160, 160, 160)
+ r.pdf.SetDashPattern([]float64{0.5, 0.5}, 0)
+
+ // ===============
+
+ var y float64 = 50
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Text(11, y, "Results Overview")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Management Summary")
+ r.pdf.Text(175, y, "{management-summary}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ risksStr := "Risks"
+ catStr := "Categories"
+ count, catCount := types.TotalRiskCount(parsedModel), len(parsedModel.GeneratedRisksByCategory)
+ if count == 1 {
+ risksStr = "Risk"
+ }
+ if catCount == 1 {
+ catStr = "Category"
+ }
+ y += 6
+ r.pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Initial "+risksStr+" in "+strconv.Itoa(catCount)+" "+catStr)
+ r.pdf.Text(175, y, "{impact-analysis-initial-risks}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Risk Mitigation")
+ r.pdf.Text(175, y, "{risk-mitigation-status}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ risksStr = "Risks"
+ catStr = "Categories"
+ count, catCount = len(types.FilteredByStillAtRisk(parsedModel)), len(types.CategoriesOfOnlyRisksStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory))
+ if count == 1 {
+ risksStr = "Risk"
+ }
+ if catCount == 1 {
+ catStr = "Category"
+ }
+ r.pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Remaining "+risksStr+" in "+strconv.Itoa(catCount)+" "+catStr)
+ r.pdf.Text(175, y, "{impact-analysis-remaining-risks}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Application Overview")
+ r.pdf.Text(175, y, "{target-overview}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Data-Flow Diagram")
+ r.pdf.Text(175, y, "{data-flow-diagram}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Security Requirements")
+ r.pdf.Text(175, y, "{security-requirements}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Abuse Cases")
+ r.pdf.Text(175, y, "{abuse-cases}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Tag Listing")
+ r.pdf.Text(175, y, "{tag-listing}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"STRIDE Classification of Identified Risks")
+ r.pdf.Text(175, y, "{stride}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Assignment by Function")
+ r.pdf.Text(175, y, "{function-assignment}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"RAA Analysis")
+ r.pdf.Text(175, y, "{raa-analysis}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ r.pdf.Text(11, y, " "+"Data Mapping")
+ r.pdf.Text(175, y, "{data-risk-mapping}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ /*
+ y += 6
+ assets := "assets"
+ count = len(model.SortedTechnicalAssetsByQuickWinsAndTitle())
+ if count == 1 {
+ assets = "asset"
+ }
+ r.pdf.Text(11, y, " "+"Data Risk Quick Wins: "+strconv.Itoa(count)+" "+assets)
+ r.pdf.Text(175, y, "{data-risk-quick-wins}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ */
+
+ y += 6
+ assets := "Assets"
+ count = len(parsedModel.OutOfScopeTechnicalAssets())
+ if count == 1 {
+ assets = "Asset"
+ }
+ r.pdf.Text(11, y, " "+"Out-of-Scope Assets: "+strconv.Itoa(count)+" "+assets)
+ r.pdf.Text(175, y, "{out-of-scope-assets}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ modelFailures := types.FlattenRiskSlice(types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory))
+ risksStr = "Risks"
+ count = len(modelFailures)
+ if count == 1 {
+ risksStr = "Risk"
+ }
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, modelFailures))
+ if countStillAtRisk > 0 {
+ ColorModelFailure(r.pdf)
+ }
+ r.pdf.Text(11, y, " "+"Potential Model Failures: "+strconv.Itoa(countStillAtRisk)+" / "+strconv.Itoa(count)+" "+risksStr)
+ r.pdf.Text(175, y, "{model-failures}")
+ r.pdfColorBlack()
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ y += 6
+ questions := "Questions"
+ count = len(parsedModel.Questions)
+ if count == 1 {
+ questions = "Question"
+ }
+ if questionsUnanswered(parsedModel) > 0 {
+ ColorModelFailure(r.pdf)
+ }
+ r.pdf.Text(11, y, " "+"Questions: "+strconv.Itoa(questionsUnanswered(parsedModel))+" / "+strconv.Itoa(count)+" "+questions)
+ r.pdf.Text(175, y, "{questions}")
+ r.pdfColorBlack()
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+
+ // ===============
+
+ if len(parsedModel.GeneratedRisksByCategory) > 0 {
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ r.pdf.Text(11, y, "Risks by Vulnerability Category")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ y += 6
+ r.pdf.Text(11, y, " "+"Identified Risks by Vulnerability Category")
+ r.pdf.Text(175, y, "{intro-risks-by-vulnerability-category}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ for _, category := range types.SortedRiskCategories(parsedModel) {
+ newRisksStr := types.SortedRisksOfCategory(parsedModel, category)
+ switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk"
+ if len(newRisksStr) != 1 {
+ suffix += "s"
+ }
+ r.pdf.Text(11, y, " "+uni(category.Title)+": "+suffix)
+ r.pdf.Text(175, y, "{"+category.Id+"}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.tocLinkIdByAssetId[category.Id] = r.pdf.AddLink()
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[category.Id])
+ }
+ }
+
+ // ===============
+
+ if len(parsedModel.TechnicalAssets) > 0 {
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ r.pdf.Text(11, y, "Risks by Technical Asset")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ y += 6
+ r.pdf.Text(11, y, " "+"Identified Risks by Technical Asset")
+ r.pdf.Text(175, y, "{intro-risks-by-technical-asset}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ for _, technicalAsset := range sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel) {
+ newRisksStr := technicalAsset.GeneratedRisks(parsedModel)
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk"
+ if len(newRisksStr) != 1 {
+ suffix += "s"
+ }
+ if technicalAsset.OutOfScope {
+ r.pdfColorOutOfScope()
+ suffix = "out-of-scope"
+ } else {
+ switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+ }
+ r.pdf.Text(11, y, " "+uni(technicalAsset.Title)+": "+suffix)
+ r.pdf.Text(175, y, "{"+technicalAsset.Id+"}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.tocLinkIdByAssetId[technicalAsset.Id] = r.pdf.AddLink()
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[technicalAsset.Id])
+ }
+ }
+
+ // ===============
+
+ if len(parsedModel.DataAssets) > 0 {
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdfColorBlack()
+ r.pdf.Text(11, y, "Data Breach Probabilities by Data Asset")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ y += 6
+ r.pdf.Text(11, y, " "+"Identified Data Breach Probabilities by Data Asset")
+ r.pdf.Text(175, y, "{intro-risks-by-data-asset}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ for _, dataAsset := range sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel) {
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ newRisksStr := dataAsset.IdentifiedDataBreachProbabilityRisks(parsedModel)
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(newRisksStr)) + " Risk"
+ if len(newRisksStr) != 1 {
+ suffix += "s"
+ }
+ switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) {
+ case types.Probable:
+ ColorHighRisk(r.pdf)
+ case types.Possible:
+ ColorMediumRisk(r.pdf)
+ case types.Improbable:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) {
+ r.pdfColorBlack()
+ }
+ r.pdf.Text(11, y, " "+uni(dataAsset.Title)+": "+suffix)
+ r.pdf.Text(175, y, "{data:"+dataAsset.Id+"}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.tocLinkIdByAssetId[dataAsset.Id] = r.pdf.AddLink()
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[dataAsset.Id])
+ }
+ }
+
+ // ===============
+
+ if len(parsedModel.TrustBoundaries) > 0 {
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdfColorBlack()
+ r.pdf.Text(11, y, "Trust Boundaries")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ for _, key := range types.SortedKeysOfTrustBoundaries(parsedModel) {
+ trustBoundary := parsedModel.TrustBoundaries[key]
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ ColorTwilight(r.pdf)
+ if !trustBoundary.Type.IsNetworkBoundary() {
+ r.pdfColorLightGray()
+ }
+ r.pdf.Text(11, y, " "+uni(trustBoundary.Title))
+ r.pdf.Text(175, y, "{boundary:"+trustBoundary.Id+"}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.tocLinkIdByAssetId[trustBoundary.Id] = r.pdf.AddLink()
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[trustBoundary.Id])
+ }
+ r.pdfColorBlack()
+ }
+
+ // ===============
+
+ if len(parsedModel.SharedRuntimes) > 0 {
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdfColorBlack()
+ r.pdf.Text(11, y, "Shared Runtime")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ for _, key := range types.SortedKeysOfSharedRuntime(parsedModel) {
+ sharedRuntime := parsedModel.SharedRuntimes[key]
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.Text(11, y, " "+uni(sharedRuntime.Title))
+ r.pdf.Text(175, y, "{runtime:"+sharedRuntime.Id+"}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.tocLinkIdByAssetId[sharedRuntime.Id] = r.pdf.AddLink()
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.tocLinkIdByAssetId[sharedRuntime.Id])
+ }
+ }
+
+ // ===============
+
+ y += 6
+ y += 6
+ if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Text(11, y, "About Threagile")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdf.Text(11, y, " "+"Risk Rules Checked by Threagile")
+ r.pdf.Text(175, y, "{risk-rules-checked}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ y += 6
+ if y > 275 {
+ r.pageBreakInLists()
+ y = 40
+ }
+ r.pdfColorDisclaimer()
+ r.pdf.Text(11, y, " "+"Disclaimer")
+ r.pdf.Text(175, y, "{disclaimer}")
+ r.pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
+ r.pdf.Link(10, y-5, 172.5, 6.5, r.pdf.AddLink())
+ r.pdfColorBlack()
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+
+ // Now write all the sections/pages. Before we start writing, we use `RegisterAlias` to
+ // ensure that the alias written in the table of contents will be replaced
+ // by the current page number. --> See the "r.pdf.RegisterAlias()" calls during the PDF creation in this file
+}
+
+func sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset {
+ assets := make([]types.TechnicalAsset, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ assets = append(assets, asset)
+ }
+ types.SortByTechnicalAssetRiskSeverityAndTitleStillAtRisk(assets, parsedModel)
+ return assets
+}
+
+func sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel *types.ParsedModel) []types.DataAsset {
+ assets := make([]types.DataAsset, 0)
+ for _, asset := range parsedModel.DataAssets {
+ assets = append(assets, asset)
+ }
+
+ types.SortByDataAssetDataBreachProbabilityAndTitleStillAtRisk(parsedModel, assets)
+ return assets
+}
+
+func (r *pdfReporter) defineLinkTarget(alias string) {
+ pageNumbStr := strconv.Itoa(r.pdf.PageNo())
+ if len(pageNumbStr) == 1 {
+ pageNumbStr = " " + pageNumbStr
+ } else if len(pageNumbStr) == 2 {
+ pageNumbStr = " " + pageNumbStr
+ }
+ r.pdf.RegisterAlias(alias, pageNumbStr)
+ r.pdf.SetLink(r.linkCounter, 0, -1)
+ r.linkCounter++
+}
+
+func (r *pdfReporter) createDisclaimer(parsedModel *types.ParsedModel) {
+ r.pdf.AddPage()
+ r.currentChapterTitleBreadcrumb = "Disclaimer"
+ r.defineLinkTarget("{disclaimer}")
+ gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300)
+ r.pdfColorDisclaimer()
+ r.pdf.SetFont("Helvetica", "B", fontSizeHeadline)
+ r.pdf.Text(11, 40, "Disclaimer")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetY(46)
+
+ var disclaimer strings.Builder
+ disclaimer.WriteString(parsedModel.Author.Name + " conducted this threat analysis using the open-source Threagile toolkit " +
+ "on the applications and systems that were modeled as of this report's date. " +
+ "Information security threats are continually changing, with new " +
+ "vulnerabilities discovered on a daily basis, and no application can ever be 100% secure no matter how much " +
+ "threat modeling is conducted. It is recommended to execute threat modeling and also penetration testing on a regular basis " +
+ "(for example yearly) to ensure a high ongoing level of security and constantly check for new attack vectors. " +
+ "
" +
+ "This report cannot and does not protect against personal or business loss as the result of use of the " +
+ "applications or systems described. " + parsedModel.Author.Name + " and the Threagile toolkit offers no warranties, representations or " +
+ "legal certifications concerning the applications or systems it tests. All software includes defects: nothing " +
+ "in this document is intended to represent or warrant that threat modeling was complete and without error, " +
+ "nor does this document represent or warrant that the architecture analyzed is suitable to task, free of other " +
+ "defects than reported, fully compliant with any industry standards, or fully compatible with any operating " +
+ "system, hardware, or other application. Threat modeling tries to analyze the modeled architecture without " +
+ "having access to a real working system and thus cannot and does not test the implementation for defects and vulnerabilities. " +
+ "These kinds of checks would only be possible with a separate code review and penetration test against " +
+ "a working system and not via a threat model." +
+ "
" +
+ "By using the resulting information you agree that " + parsedModel.Author.Name + " and the Threagile toolkit " +
+ "shall be held harmless in any event." +
+ "
" +
+ "This report is confidential and intended for internal, confidential use by the client. The recipient " +
+ "is obligated to ensure the highly confidential contents are kept secret. The recipient assumes responsibility " +
+ "for further distribution of this document." +
+ "
" +
+ "In this particular project, a time box approach was used to define the analysis effort. This means that the " +
+ "author allotted a prearranged amount of time to identify and document threats. Because of this, there " +
+ "is no guarantee that all possible threats and risks are discovered. Furthermore, the analysis " +
+ "applies to a snapshot of the current state of the modeled architecture (based on the architecture information provided " +
+ "by the customer) at the examination time." +
+ "
" +
+ "Report Distribution" +
+ "
" +
+ "Distribution of this report (in full or in part like diagrams or risk findings) requires that this disclaimer " +
+ "as well as the chapter about the Threagile toolkit and method used is kept intact as part of the " +
+ "distributed report or referenced from the distributed parts.")
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, disclaimer.String())
+ r.pdfColorBlack()
+}
+
+func (r *pdfReporter) createManagementSummary(parsedModel *types.ParsedModel, tempFolder string) error {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Management Summary"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{management-summary}")
+ r.currentChapterTitleBreadcrumb = title
+ countCritical := len(types.FilteredByOnlyCriticalRisks(parsedModel))
+ countHigh := len(types.FilteredByOnlyHighRisks(parsedModel))
+ countElevated := len(types.FilteredByOnlyElevatedRisks(parsedModel))
+ countMedium := len(types.FilteredByOnlyMediumRisks(parsedModel))
+ countLow := len(types.FilteredByOnlyLowRisks(parsedModel))
+
+ countStatusUnchecked := len(types.FilteredByRiskTrackingUnchecked(parsedModel))
+ countStatusInDiscussion := len(types.FilteredByRiskTrackingInDiscussion(parsedModel))
+ countStatusAccepted := len(types.FilteredByRiskTrackingAccepted(parsedModel))
+ countStatusInProgress := len(types.FilteredByRiskTrackingInProgress(parsedModel))
+ countStatusMitigated := len(types.FilteredByRiskTrackingMitigated(parsedModel))
+ countStatusFalsePositive := len(types.FilteredByRiskTrackingFalsePositive(parsedModel))
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "Threagile toolkit was used to model the architecture of \""+uni(parsedModel.Title)+"\" "+
+ "and derive risks by analyzing the components and data flows. The risks identified during this analysis are shown "+
+ "in the following chapters. Identified risks during threat modeling do not necessarily mean that the "+
+ "vulnerability associated with this risk actually exists: it is more to be seen as a list of potential risks and "+
+ "threats, which should be individually reviewed and reduced by removing false positives. For the remaining risks it should "+
+ "be checked in the design and implementation of \""+uni(parsedModel.Title)+"\" whether the mitigation advices "+
+ "have been applied or not."+
+ "
"+
+ "Each risk finding references a chapter of the OWASP ASVS (Application Security Verification Standard) audit checklist. "+
+ "The OWASP ASVS checklist should be considered as an inspiration by architects and developers to further harden "+
+ "the application in a Defense-in-Depth approach. Additionally, for each risk finding a "+
+ "link towards a matching OWASP Cheat Sheet or similar with technical details about how to implement a mitigation is given."+
+ "
"+
+ "In total "+strconv.Itoa(types.TotalRiskCount(parsedModel))+" initial risks in "+strconv.Itoa(len(parsedModel.GeneratedRisksByCategory))+" categories have "+
+ "been identified during the threat modeling process:
") // TODO plural singular stuff risk/s category/ies has/have
+
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "")
+ ColorRiskStatusUnchecked(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+
+ ColorCriticalRisk(r.pdf)
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "critical risk", "0", 0, "", false, 0, "")
+ ColorRiskStatusInDiscussion(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+
+ ColorHighRisk(r.pdf)
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "high risk", "0", 0, "", false, 0, "")
+ ColorRiskStatusAccepted(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+
+ ColorElevatedRisk(r.pdf)
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "elevated risk", "0", 0, "", false, 0, "")
+ ColorRiskStatusInProgress(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+
+ ColorMediumRisk(r.pdf)
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "medium risk", "0", 0, "", false, 0, "")
+ ColorRiskStatusMitigated(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "")
+ r.pdf.SetFont("Helvetica", "BI", fontSizeBody)
+ r.pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(-1)
+
+ ColorLowRisk(r.pdf)
+ r.pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "low risk", "0", 0, "", false, 0, "")
+ ColorRiskStatusFalsePositive(r.pdf)
+ r.pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "")
+ r.pdf.SetFont("Helvetica", "BI", fontSizeBody)
+ r.pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(-1)
+
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ // pie chart: risk severity
+ pieChartRiskSeverity := chart.PieChart{
+ Width: 1500,
+ Height: 1500,
+ Values: []chart.Value{
+ {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorLowRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorLowRisk()),
+ FontSize: 65}},
+ {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorMediumRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorMediumRisk()),
+ FontSize: 65}},
+ {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorElevatedRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorElevatedRisk()),
+ FontSize: 65}},
+ {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorHighRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorHighRisk()),
+ FontSize: 65}},
+ {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorCriticalRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorCriticalRisk()),
+ FontSize: 65}},
+ },
+ }
+
+ // pie chart: risk status
+ pieChartRiskStatus := chart.PieChart{
+ Width: 1500,
+ Height: 1500,
+ Values: []chart.Value{
+ {Value: float64(countStatusFalsePositive), //Label: strconv.Itoa(countStatusFalsePositive) + " False Positive",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusFalsePositive()),
+ FontSize: 65}},
+ {Value: float64(countStatusMitigated), //Label: strconv.Itoa(countStatusMitigated) + " Mitigated",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusMitigated()),
+ FontSize: 65}},
+ {Value: float64(countStatusInProgress), //Label: strconv.Itoa(countStatusInProgress) + " InProgress",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusInProgress()),
+ FontSize: 65}},
+ {Value: float64(countStatusAccepted), //Label: strconv.Itoa(countStatusAccepted) + " Accepted",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusAccepted()),
+ FontSize: 65}},
+ {Value: float64(countStatusInDiscussion), //Label: strconv.Itoa(countStatusInDiscussion) + " InDiscussion",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusInDiscussion()),
+ FontSize: 65}},
+ {Value: float64(countStatusUnchecked), //Label: strconv.Itoa(countStatusUnchecked) + " Unchecked",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorRiskStatusUnchecked()),
+ FontSize: 65}},
+ },
+ }
+
+ y := r.pdf.GetY() + 5
+ err := r.embedPieChart(pieChartRiskSeverity, 15.0, y, tempFolder)
+ if err != nil {
+ return fmt.Errorf("unable to embed pie chart: %w", err)
+ }
+
+ err = r.embedPieChart(pieChartRiskStatus, 110.0, y, tempFolder)
+ if err != nil {
+ return fmt.Errorf("unable to embed pie chart: %w", err)
+ }
+
+ // individual management summary comment
+ r.pdfColorBlack()
+ if len(parsedModel.ManagementSummaryComment) > 0 {
+ html.Write(5, "
"+
+ parsedModel.ManagementSummaryComment)
+ }
+ return nil
+}
+
+func (r *pdfReporter) createRiskMitigationStatus(parsedModel *types.ParsedModel, tempFolder string) error {
+ r.pdf.SetTextColor(0, 0, 0)
+ stillAtRisk := types.FilteredByStillAtRisk(parsedModel)
+ count := len(stillAtRisk)
+ title := "Risk Mitigation"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{risk-mitigation-status}")
+ r.currentChapterTitleBreadcrumb = title
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "The following chart gives a high-level overview of the risk tracking status (including mitigated risks):")
+
+ risksCritical := types.FilteredByOnlyCriticalRisks(parsedModel)
+ risksHigh := types.FilteredByOnlyHighRisks(parsedModel)
+ risksElevated := types.FilteredByOnlyElevatedRisks(parsedModel)
+ risksMedium := types.FilteredByOnlyMediumRisks(parsedModel)
+ risksLow := types.FilteredByOnlyLowRisks(parsedModel)
+
+ countStatusUnchecked := len(types.FilteredByRiskTrackingUnchecked(parsedModel))
+ countStatusInDiscussion := len(types.FilteredByRiskTrackingInDiscussion(parsedModel))
+ countStatusAccepted := len(types.FilteredByRiskTrackingAccepted(parsedModel))
+ countStatusInProgress := len(types.FilteredByRiskTrackingInProgress(parsedModel))
+ countStatusMitigated := len(types.FilteredByRiskTrackingMitigated(parsedModel))
+ countStatusFalsePositive := len(types.FilteredByRiskTrackingFalsePositive(parsedModel))
+
+ stackedBarChartRiskTracking := chart.StackedBarChart{
+ Width: 4000,
+ //Height: 2500,
+ XAxis: chart.Style{Show: false, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom},
+ YAxis: chart.Style{Show: true, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom},
+ Bars: []chart.StackedBar{
+ {
+ Name: types.LowSeverity.Title(),
+ Width: 130,
+ Values: []chart.Value{
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksLow))), Label: types.Unchecked.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksLow))), Label: types.InDiscussion.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksLow))), Label: types.Accepted.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksLow))), Label: types.InProgress.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksLow))), Label: types.Mitigated.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksLow))), Label: types.FalsePositive.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ },
+ },
+ {
+ Name: types.MediumSeverity.Title(),
+ Width: 130,
+ Values: []chart.Value{
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksMedium))), Label: types.Unchecked.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksMedium))), Label: types.InDiscussion.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksMedium))), Label: types.Accepted.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksMedium))), Label: types.InProgress.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksMedium))), Label: types.Mitigated.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksMedium))), Label: types.FalsePositive.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ },
+ },
+ {
+ Name: types.ElevatedSeverity.Title(),
+ Width: 130,
+ Values: []chart.Value{
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksElevated))), Label: types.Unchecked.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksElevated))), Label: types.InDiscussion.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksElevated))), Label: types.Accepted.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksElevated))), Label: types.InProgress.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksElevated))), Label: types.Mitigated.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksElevated))), Label: types.FalsePositive.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ },
+ },
+ {
+ Name: types.HighSeverity.Title(),
+ Width: 130,
+ Values: []chart.Value{
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksHigh))), Label: types.Unchecked.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksHigh))), Label: types.InDiscussion.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksHigh))), Label: types.Accepted.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksHigh))), Label: types.InProgress.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksHigh))), Label: types.Mitigated.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksHigh))), Label: types.FalsePositive.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ },
+ },
+ {
+ Name: types.CriticalSeverity.Title(),
+ Width: 130,
+ Values: []chart.Value{
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingUnchecked(parsedModel, risksCritical))), Label: types.Unchecked.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInDiscussion(parsedModel, risksCritical))), Label: types.InDiscussion.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingAccepted(parsedModel, risksCritical))), Label: types.Accepted.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingInProgress(parsedModel, risksCritical))), Label: types.InProgress.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingMitigated(parsedModel, risksCritical))), Label: types.Mitigated.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ {Value: float64(len(types.ReduceToOnlyRiskTrackingFalsePositive(parsedModel, risksCritical))), Label: types.FalsePositive.Title(),
+ Style: chart.Style{FillColor: makeColor(RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
+ },
+ },
+ },
+ }
+
+ y := r.pdf.GetY() + 12
+ err := r.embedStackedBarChart(stackedBarChartRiskTracking, 15.0, y, tempFolder)
+ if err != nil {
+ return err
+ }
+
+ // draw the X-Axis legend on my own
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorBlack()
+ r.pdf.Text(24.02, 169, "Low ("+strconv.Itoa(len(risksLow))+")")
+ r.pdf.Text(46.10, 169, "Medium ("+strconv.Itoa(len(risksMedium))+")")
+ r.pdf.Text(69.74, 169, "Elevated ("+strconv.Itoa(len(risksElevated))+")")
+ r.pdf.Text(97.95, 169, "High ("+strconv.Itoa(len(risksHigh))+")")
+ r.pdf.Text(121.65, 169, "Critical ("+strconv.Itoa(len(risksCritical))+")")
+
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(20)
+
+ ColorRiskStatusUnchecked(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorRiskStatusInDiscussion(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorRiskStatusAccepted(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorRiskStatusInProgress(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorRiskStatusMitigated(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "")
+ r.pdf.SetFont("Helvetica", "BI", fontSizeBody)
+ r.pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(-1)
+ ColorRiskStatusFalsePositive(r.pdf)
+ r.pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "")
+ r.pdf.SetFont("Helvetica", "BI", fontSizeBody)
+ r.pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(-1)
+
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ r.pdfColorBlack()
+ if count == 0 {
+ html.Write(5, "
"+
+ "After removal of risks with status mitigated and false positive "+
+ ""+strconv.Itoa(count)+" remain unmitigated.")
+ } else {
+ html.Write(5, "
"+
+ "After removal of risks with status mitigated and false positive "+
+ "the following "+strconv.Itoa(count)+" remain unmitigated:")
+
+ countCritical := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyCriticalRisks(parsedModel)))
+ countHigh := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyHighRisks(parsedModel)))
+ countElevated := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyElevatedRisks(parsedModel)))
+ countMedium := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyMediumRisks(parsedModel)))
+ countLow := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyLowRisks(parsedModel)))
+
+ countBusinessSide := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyBusinessSide(parsedModel)))
+ countArchitecture := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyArchitecture(parsedModel)))
+ countDevelopment := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyDevelopment(parsedModel)))
+ countOperation := len(types.ReduceToOnlyStillAtRisk(parsedModel, types.FilteredByOnlyOperation(parsedModel)))
+
+ pieChartRemainingRiskSeverity := chart.PieChart{
+ Width: 1500,
+ Height: 1500,
+ Values: []chart.Value{
+ {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorLowRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorLowRisk()),
+ FontSize: 65}},
+ {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorMediumRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorMediumRisk()),
+ FontSize: 65}},
+ {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorElevatedRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorElevatedRisk()),
+ FontSize: 65}},
+ {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorHighRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorHighRisk()),
+ FontSize: 65}},
+ {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical",
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorCriticalRisk()).WithAlpha(98),
+ //FontColor: makeColor(RgbHexColorCriticalRisk()),
+ FontSize: 65}},
+ },
+ }
+
+ pieChartRemainingRisksByFunction := chart.PieChart{
+ Width: 1500,
+ Height: 1500,
+ Values: []chart.Value{
+ {Value: float64(countBusinessSide),
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorBusiness()).WithAlpha(98),
+ FontSize: 65}},
+ {Value: float64(countArchitecture),
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorArchitecture()).WithAlpha(98),
+ FontSize: 65}},
+ {Value: float64(countDevelopment),
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorDevelopment()).WithAlpha(98),
+ FontSize: 65}},
+ {Value: float64(countOperation),
+ Style: chart.Style{
+ FillColor: makeColor(RgbHexColorOperation()).WithAlpha(98),
+ FontSize: 65}},
+ },
+ }
+
+ _ = r.embedPieChart(pieChartRemainingRiskSeverity, 15.0, 216, tempFolder)
+ _ = r.embedPieChart(pieChartRemainingRisksByFunction, 110.0, 216, tempFolder)
+
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.Ln(8)
+
+ ColorCriticalRisk(r.pdf)
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unmitigated critical risk", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, "", "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorHighRisk(r.pdf)
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unmitigated high risk", "0", 0, "", false, 0, "")
+ ColorBusiness(r.pdf)
+ r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countBusinessSide), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "business side related", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorElevatedRisk(r.pdf)
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unmitigated elevated risk", "0", 0, "", false, 0, "")
+ ColorArchitecture(r.pdf)
+ r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countArchitecture), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "architecture related", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorMediumRisk(r.pdf)
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unmitigated medium risk", "0", 0, "", false, 0, "")
+ ColorDevelopment(r.pdf)
+ r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countDevelopment), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "development related", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ ColorLowRisk(r.pdf)
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "unmitigated low risk", "0", 0, "", false, 0, "")
+ ColorOperation(r.pdf)
+ r.pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(10, 6, strconv.Itoa(countOperation), "0", 0, "R", false, 0, "")
+ r.pdf.CellFormat(60, 6, "operations related", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ }
+ return nil
+}
+
+// CAUTION: Long labels might cause endless loop, then remove labels and render them manually later inside the PDF
+func (r *pdfReporter) embedStackedBarChart(sbcChart chart.StackedBarChart, x float64, y float64, tempFolder string) error {
+ tmpFilePNG, err := os.CreateTemp(tempFolder, "chart-*-.png")
+ if err != nil {
+ return fmt.Errorf("error creating temporary file for chart: %w", err)
+ }
+ defer func() { _ = os.Remove(tmpFilePNG.Name()) }()
+ file, _ := os.Create(tmpFilePNG.Name())
+ defer func() { _ = file.Close() }()
+ err = sbcChart.Render(chart.PNG, file)
+ if err != nil {
+ return fmt.Errorf("error rendering chart: %w", err)
+ }
+ var options gofpdf.ImageOptions
+ options.ImageType = ""
+ r.pdf.RegisterImage(tmpFilePNG.Name(), "")
+ r.pdf.ImageOptions(tmpFilePNG.Name(), x, y, 0, 110, false, options, 0, "")
+ return nil
+}
+
+func (r *pdfReporter) embedPieChart(pieChart chart.PieChart, x float64, y float64, tempFolder string) error {
+ tmpFilePNG, err := os.CreateTemp(tempFolder, "chart-*-.png")
+ if err != nil {
+ return fmt.Errorf("error creating temporary file for chart: %w", err)
+ }
+ defer func() { _ = os.Remove(tmpFilePNG.Name()) }()
+ file, err := os.Create(tmpFilePNG.Name())
+ if err != nil {
+ return fmt.Errorf("error creating temporary file for chart: %w", err)
+ }
+ defer func() { _ = file.Close() }()
+ err = pieChart.Render(chart.PNG, file)
+ if err != nil {
+ return fmt.Errorf("error rendering chart: %w", err)
+ }
+ var options gofpdf.ImageOptions
+ options.ImageType = ""
+ r.pdf.RegisterImage(tmpFilePNG.Name(), "")
+ r.pdf.ImageOptions(tmpFilePNG.Name(), x, y, 60, 0, false, options, 0, "")
+ return nil
+}
+
+func makeColor(hexColor string) drawing.Color {
+ _, i := utf8.DecodeRuneInString(hexColor)
+ return drawing.ColorFromHex(hexColor[i:]) // = remove first char, which is # in rgb hex here
+}
+
+func (r *pdfReporter) createImpactInitialRisks(parsedModel *types.ParsedModel) {
+ r.renderImpactAnalysis(parsedModel, true)
+}
+
+func (r *pdfReporter) createImpactRemainingRisks(parsedModel *types.ParsedModel) {
+ r.renderImpactAnalysis(parsedModel, false)
+}
+
+func (r *pdfReporter) renderImpactAnalysis(parsedModel *types.ParsedModel, initialRisks bool) {
+ r.pdf.SetTextColor(0, 0, 0)
+ count, catCount := types.TotalRiskCount(parsedModel), len(parsedModel.GeneratedRisksByCategory)
+ if !initialRisks {
+ count, catCount = len(types.FilteredByStillAtRisk(parsedModel)), len(types.CategoriesOfOnlyRisksStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory))
+ }
+ riskStr, catStr := "Risks", "Categories"
+ if count == 1 {
+ riskStr = "Risk"
+ }
+ if catCount == 1 {
+ catStr = "Category"
+ }
+ if initialRisks {
+ chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Initial " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{impact-analysis-initial-risks}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+ } else {
+ chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Remaining " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{impact-analysis-remaining-risks}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+ }
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ riskStr = "risks"
+ if count == 1 {
+ riskStr = "risk"
+ }
+ initialStr := "initial"
+ if !initialRisks {
+ initialStr = "remaining"
+ }
+ strBuilder.WriteString("The most prevalent impacts of the " + strconv.Itoa(count) + " " +
+ initialStr + " " + riskStr + " (distributed over " + strconv.Itoa(catCount) + " risk categories) are " +
+ "(taking the severity ratings into account and using the highest for each category):
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)),
+ types.CriticalSeverity, false, initialRisks, true, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)),
+ types.HighSeverity, false, initialRisks, true, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)),
+ types.ElevatedSeverity, false, initialRisks, true, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)),
+ types.MediumSeverity, false, initialRisks, true, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, parsedModel.GeneratedRisksByCategory, initialRisks)),
+ types.LowSeverity, false, initialRisks, true, false)
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+func (r *pdfReporter) createOutOfScopeAssets(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ assets := "Assets"
+ count := len(parsedModel.OutOfScopeTechnicalAssets())
+ if count == 1 {
+ assets = "Asset"
+ }
+ chapTitle := "Out-of-Scope Assets: " + strconv.Itoa(count) + " " + assets
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{out-of-scope-assets}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ strBuilder.WriteString("This chapter lists all technical assets that have been defined as out-of-scope. " +
+ "Each one should be checked in the model whether it should better be included in the " +
+ "overall risk analysis:
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ outOfScopeAssetCount := 0
+ for _, technicalAsset := range sortedTechnicalAssetsByRAAAndTitle(parsedModel) {
+ if technicalAsset.OutOfScope {
+ outOfScopeAssetCount++
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ strBuilder.WriteString("
")
+ }
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ posY := r.pdf.GetY()
+ r.pdfColorOutOfScope()
+ strBuilder.WriteString("")
+ strBuilder.WriteString(uni(technicalAsset.Title))
+ strBuilder.WriteString("")
+ strBuilder.WriteString(": out-of-scope")
+ strBuilder.WriteString("
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+ strBuilder.WriteString(uni(technicalAsset.JustificationOutOfScope))
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[technicalAsset.Id])
+ }
+ }
+
+ if outOfScopeAssetCount == 0 {
+ r.pdfColorGray()
+ html.Write(5, "
No technical assets have been defined as out-of-scope.")
+ }
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+func sortedTechnicalAssetsByRAAAndTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset {
+ assets := make([]types.TechnicalAsset, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ assets = append(assets, asset)
+ }
+ sort.Sort(types.ByTechnicalAssetRAAAndTitleSort(assets))
+ return assets
+}
+
+func (r *pdfReporter) createModelFailures(parsedModel *types.ParsedModel) {
+ r.pdf.SetTextColor(0, 0, 0)
+ modelFailures := types.FlattenRiskSlice(types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory))
+ risksStr := "Risks"
+ count := len(modelFailures)
+ if count == 1 {
+ risksStr = "Risk"
+ }
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, modelFailures))
+ if countStillAtRisk > 0 {
+ ColorModelFailure(r.pdf)
+ }
+ chapTitle := "Potential Model Failures: " + strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(count) + " " + risksStr
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{model-failures}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+ r.pdfColorBlack()
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ strBuilder.WriteString("This chapter lists potential model failures where not all relevant assets have been " +
+ "modeled or the model might itself contain inconsistencies. Each potential model failure should be checked " +
+ "in the model against the architecture design:
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ modelFailuresByCategory := types.FilterByModelFailures(parsedModel, parsedModel.GeneratedRisksByCategory)
+ if len(modelFailuresByCategory) == 0 {
+ r.pdfColorGray()
+ html.Write(5, "
No potential model failures have been identified.")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, modelFailuresByCategory, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, modelFailuresByCategory, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, modelFailuresByCategory, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, modelFailuresByCategory, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, modelFailuresByCategory, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+func (r *pdfReporter) createRAA(parsedModel *types.ParsedModel, introTextRAA string) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ chapTitle := "RAA Analysis"
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{raa-analysis}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ strBuilder.WriteString(introTextRAA)
+ strBuilder.WriteString("
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ for _, technicalAsset := range sortedTechnicalAssetsByRAAAndTitle(parsedModel) {
+ if technicalAsset.OutOfScope {
+ continue
+ }
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ strBuilder.WriteString("
")
+ }
+ newRisksStr := technicalAsset.GeneratedRisks(parsedModel)
+ switch types.HighestSeverityStillAtRisk(parsedModel, newRisksStr) {
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, newRisksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ posY := r.pdf.GetY()
+ strBuilder.WriteString("")
+ strBuilder.WriteString(uni(technicalAsset.Title))
+ strBuilder.WriteString("")
+ if technicalAsset.OutOfScope {
+ strBuilder.WriteString(": out-of-scope")
+ } else {
+ strBuilder.WriteString(": RAA ")
+ strBuilder.WriteString(fmt.Sprintf("%.0f", technicalAsset.RAA))
+ strBuilder.WriteString("%")
+ }
+ strBuilder.WriteString("
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+ strBuilder.WriteString(uni(technicalAsset.Description))
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[technicalAsset.Id])
+ }
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+/*
+func createDataRiskQuickWins() {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ assets := "assets"
+ count := len(model.SortedTechnicalAssetsByQuickWinsAndTitle())
+ if count == 1 {
+ assets = "asset"
+ }
+ chapTitle := "Data Risk Quick Wins: " + strconv.Itoa(count) + " " + assets
+ r.addHeadline(chapTitle, false)
+ defineLinkTarget("{data-risk-quick-wins}")
+ currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ strBuilder.WriteString("For each technical asset it was checked how many data assets at risk might " +
+ "get their risk-rating reduced (partly or fully) when the risks of the technical asset are mitigated. " +
+ "In general, that means the higher the quick win value is, the more data assets (left side of the Data Risk Mapping diagram) " +
+ "turn from red to amber or from amber to blue by mitigating the technical asset's risks. " +
+ "This list can be used to prioritize on efforts with the greatest effects of reducing data asset risks:
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ for _, technicalAsset := range model.SortedTechnicalAssetsByQuickWinsAndTitle() {
+ quickWins := technicalAsset.QuickWins()
+ if r.pdf.GetY() > 260 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ strBuilder.WriteString("
")
+ }
+ risks := technicalAsset.GeneratedRisks()
+ switch model.HighestSeverityStillAtRisk(risks) {
+ case model.High:
+ ColorHighRisk(r.pdf)
+ case model.Medium:
+ ColorMediumRisk(r.pdf)
+ case model.Low:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
+ r.pdfColorBlack()
+ }
+
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ posY := r.pdf.GetY()
+ strBuilder.WriteString("")
+ strBuilder.WriteString(uni(technicalAsset.Title))
+ strBuilder.WriteString("")
+ strBuilder.WriteString(": ")
+ strBuilder.WriteString(fmt.Sprintf("%.2f", quickWins))
+ strBuilder.WriteString(" Quick Wins")
+ strBuilder.WriteString("
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+ strBuilder.WriteString(uni(technicalAsset.Description))
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id])
+ }
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+*/
+
+func (r *pdfReporter) addCategories(parsedModel *types.ParsedModel, riskCategories []types.RiskCategory, severity types.RiskSeverity, bothInitialAndRemainingRisks bool, initialRisks bool, describeImpact bool, describeDescription bool) {
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ sort.Sort(types.ByRiskCategoryTitleSort(riskCategories))
+ for _, riskCategory := range riskCategories {
+ risksStr := parsedModel.GeneratedRisksByCategory[riskCategory.Id]
+ if !initialRisks {
+ risksStr = types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)
+ }
+ if len(risksStr) == 0 {
+ continue
+ }
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ strBuilder.WriteString("
")
+ }
+ var prefix string
+ switch severity {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ prefix = "Critical: "
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ prefix = "High: "
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ prefix = "Elevated: "
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ prefix = "Medium: "
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ prefix = "Low: "
+ default:
+ r.pdfColorBlack()
+ prefix = ""
+ }
+ switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ posY := r.pdf.GetY()
+ strBuilder.WriteString(prefix)
+ strBuilder.WriteString("")
+ strBuilder.WriteString(riskCategory.Title)
+ strBuilder.WriteString(": ")
+ count := len(risksStr)
+ initialStr := "Initial"
+ if !initialRisks {
+ initialStr = "Remaining"
+ }
+ remainingRisks := types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)
+ suffix := strconv.Itoa(count) + " " + initialStr + " Risk"
+ if bothInitialAndRemainingRisks {
+ suffix = strconv.Itoa(len(remainingRisks)) + " / " + strconv.Itoa(count) + " Risk"
+ }
+ if count != 1 {
+ suffix += "s"
+ }
+ suffix += " - Exploitation likelihood is "
+ if initialRisks {
+ suffix += types.HighestExploitationLikelihood(risksStr).Title() + " with " + types.HighestExploitationImpact(risksStr).Title() + " impact."
+ } else {
+ suffix += types.HighestExploitationLikelihood(remainingRisks).Title() + " with " + types.HighestExploitationImpact(remainingRisks).Title() + " impact."
+ }
+ strBuilder.WriteString(suffix + "
")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+ if describeImpact {
+ strBuilder.WriteString(firstParagraph(riskCategory.Impact))
+ } else if describeDescription {
+ strBuilder.WriteString(firstParagraph(riskCategory.Description))
+ } else {
+ strBuilder.WriteString(firstParagraph(riskCategory.Mitigation))
+ }
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdf.Link(9, posY, 190, r.pdf.GetY()-posY+4, r.tocLinkIdByAssetId[riskCategory.Id])
+ }
+}
+
+func firstParagraph(text string) string {
+ firstParagraphRegEx := regexp.MustCompile(`(.*?)((
)|())`)
+ match := firstParagraphRegEx.FindStringSubmatch(text)
+ if len(match) == 0 {
+ return text
+ }
+ return match[1]
+}
+
+func (r *pdfReporter) createAssignmentByFunction(parsedModel *types.ParsedModel) {
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Assignment by Function"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{function-assignment}")
+ r.currentChapterTitleBreadcrumb = title
+
+ risksBusinessSideFunction := types.RisksOfOnlyBusinessSide(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksArchitectureFunction := types.RisksOfOnlyArchitecture(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksDevelopmentFunction := types.RisksOfOnlyDevelopment(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksOperationFunction := types.RisksOfOnlyOperation(parsedModel, parsedModel.GeneratedRisksByCategory)
+
+ countBusinessSideFunction := types.CountRisks(risksBusinessSideFunction)
+ countArchitectureFunction := types.CountRisks(risksArchitectureFunction)
+ countDevelopmentFunction := types.CountRisks(risksDevelopmentFunction)
+ countOperationFunction := types.CountRisks(risksOperationFunction)
+ var intro strings.Builder
+ intro.WriteString("This chapter clusters and assigns the risks by functions which are most likely able to " +
+ "check and mitigate them: " +
+ "In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " +
+ "of which " + strconv.Itoa(countBusinessSideFunction) + " should be checked by " + types.BusinessSide.Title() + ", " +
+ "" + strconv.Itoa(countArchitectureFunction) + " should be checked by " + types.Architecture.Title() + ", " +
+ "" + strconv.Itoa(countDevelopmentFunction) + " should be checked by " + types.Development.Title() + ", " +
+ "and " + strconv.Itoa(countOperationFunction) + " should be checked by " + types.Operations.Title() + ".
")
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ oldLeft, _, _, _ := r.pdf.GetMargins()
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.BusinessSide.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksBusinessSideFunction) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksBusinessSideFunction, true)),
+ types.CriticalSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksBusinessSideFunction, true)),
+ types.HighSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksBusinessSideFunction, true)),
+ types.ElevatedSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksBusinessSideFunction, true)),
+ types.MediumSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksBusinessSideFunction, true)),
+ types.LowSeverity, true, true, false, false)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Architecture.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksArchitectureFunction) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksArchitectureFunction, true)),
+ types.CriticalSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksArchitectureFunction, true)),
+ types.HighSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksArchitectureFunction, true)),
+ types.ElevatedSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksArchitectureFunction, true)),
+ types.MediumSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksArchitectureFunction, true)),
+ types.LowSeverity, true, true, false, false)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Development.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksDevelopmentFunction) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksDevelopmentFunction, true)),
+ types.CriticalSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksDevelopmentFunction, true)),
+ types.HighSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksDevelopmentFunction, true)),
+ types.ElevatedSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksDevelopmentFunction, true)),
+ types.MediumSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksDevelopmentFunction, true)),
+ types.LowSeverity, true, true, false, false)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Operations.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksOperationFunction) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksOperationFunction, true)),
+ types.CriticalSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksOperationFunction, true)),
+ types.HighSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksOperationFunction, true)),
+ types.ElevatedSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksOperationFunction, true)),
+ types.MediumSeverity, true, true, false, false)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksOperationFunction, true)),
+ types.LowSeverity, true, true, false, false)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+func (r *pdfReporter) createSTRIDE(parsedModel *types.ParsedModel) {
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "STRIDE Classification of Identified Risks"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{stride}")
+ r.currentChapterTitleBreadcrumb = title
+
+ risksSTRIDESpoofing := types.RisksOfOnlySTRIDESpoofing(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksSTRIDETampering := types.RisksOfOnlySTRIDETampering(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksSTRIDERepudiation := types.RisksOfOnlySTRIDERepudiation(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksSTRIDEInformationDisclosure := types.RisksOfOnlySTRIDEInformationDisclosure(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksSTRIDEDenialOfService := types.RisksOfOnlySTRIDEDenialOfService(parsedModel, parsedModel.GeneratedRisksByCategory)
+ risksSTRIDEElevationOfPrivilege := types.RisksOfOnlySTRIDEElevationOfPrivilege(parsedModel, parsedModel.GeneratedRisksByCategory)
+
+ countSTRIDESpoofing := types.CountRisks(risksSTRIDESpoofing)
+ countSTRIDETampering := types.CountRisks(risksSTRIDETampering)
+ countSTRIDERepudiation := types.CountRisks(risksSTRIDERepudiation)
+ countSTRIDEInformationDisclosure := types.CountRisks(risksSTRIDEInformationDisclosure)
+ countSTRIDEDenialOfService := types.CountRisks(risksSTRIDEDenialOfService)
+ countSTRIDEElevationOfPrivilege := types.CountRisks(risksSTRIDEElevationOfPrivilege)
+ var intro strings.Builder
+ intro.WriteString("This chapter clusters and classifies the risks by STRIDE categories: " +
+ "In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " +
+ "of which " + strconv.Itoa(countSTRIDESpoofing) + " in the " + types.Spoofing.Title() + " category, " +
+ "" + strconv.Itoa(countSTRIDETampering) + " in the " + types.Tampering.Title() + " category, " +
+ "" + strconv.Itoa(countSTRIDERepudiation) + " in the " + types.Repudiation.Title() + " category, " +
+ "" + strconv.Itoa(countSTRIDEInformationDisclosure) + " in the " + types.InformationDisclosure.Title() + " category, " +
+ "" + strconv.Itoa(countSTRIDEDenialOfService) + " in the " + types.DenialOfService.Title() + " category, " +
+ "and " + strconv.Itoa(countSTRIDEElevationOfPrivilege) + " in the " + types.ElevationOfPrivilege.Title() + " category.
")
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ oldLeft, _, _, _ := r.pdf.GetMargins()
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Spoofing.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDESpoofing) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDESpoofing, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDESpoofing, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDESpoofing, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDESpoofing, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDESpoofing, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Tampering.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDETampering) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDETampering, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDETampering, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDETampering, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDETampering, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDETampering, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.Repudiation.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDERepudiation) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDERepudiation, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDERepudiation, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDERepudiation, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDERepudiation, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDERepudiation, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.InformationDisclosure.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDEInformationDisclosure) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEInformationDisclosure, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEInformationDisclosure, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEInformationDisclosure, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEInformationDisclosure, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEInformationDisclosure, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.DenialOfService.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDEDenialOfService) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEDenialOfService, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEDenialOfService, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEDenialOfService, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEDenialOfService, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEDenialOfService, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetTextColor(0, 0, 0)
+ html.Write(5, ""+types.ElevationOfPrivilege.Title()+"")
+ r.pdf.SetLeftMargin(15)
+ if len(risksSTRIDEElevationOfPrivilege) == 0 {
+ r.pdf.SetTextColor(150, 150, 150)
+ html.Write(5, "
n/a")
+ } else {
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyCriticalRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)),
+ types.CriticalSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyHighRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)),
+ types.HighSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyElevatedRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)),
+ types.ElevatedSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyMediumRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)),
+ types.MediumSeverity, true, true, false, true)
+ r.addCategories(parsedModel, types.GetRiskCategories(parsedModel, types.CategoriesOfOnlyLowRisks(parsedModel, risksSTRIDEElevationOfPrivilege, true)),
+ types.LowSeverity, true, true, false, true)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+}
+
+func (r *pdfReporter) createSecurityRequirements(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ chapTitle := "Security Requirements"
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{security-requirements}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "This chapter lists the custom security requirements which have been defined for the modeled target.")
+ r.pdfColorBlack()
+ for _, title := range sortedKeysOfSecurityRequirements(parsedModel) {
+ description := parsedModel.SecurityRequirements[title]
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ html.Write(5, ""+uni(title)+"
")
+ html.Write(5, uni(description))
+ }
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ html.Write(5, "This list is not complete and regulatory or law relevant security requirements have to be "+
+ "taken into account as well. Also custom individual security requirements might exist for the project.")
+}
+
+func sortedKeysOfSecurityRequirements(parsedModel *types.ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range parsedModel.SecurityRequirements {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (r *pdfReporter) createAbuseCases(parsedModel *types.ParsedModel) {
+ r.pdf.SetTextColor(0, 0, 0)
+ chapTitle := "Abuse Cases"
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{abuse-cases}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "This chapter lists the custom abuse cases which have been defined for the modeled target.")
+ r.pdfColorBlack()
+ for _, title := range sortedKeysOfAbuseCases(parsedModel) {
+ description := parsedModel.AbuseCases[title]
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ html.Write(5, ""+title+"
")
+ html.Write(5, description)
+ }
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ html.Write(5, "This list is not complete and regulatory or law relevant abuse cases have to be "+
+ "taken into account as well. Also custom individual abuse cases might exist for the project.")
+}
+
+func sortedKeysOfAbuseCases(parsedModel *types.ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range parsedModel.AbuseCases {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (r *pdfReporter) createQuestions(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ questions := "Questions"
+ count := len(parsedModel.Questions)
+ if count == 1 {
+ questions = "Question"
+ }
+ if questionsUnanswered(parsedModel) > 0 {
+ ColorModelFailure(r.pdf)
+ }
+ chapTitle := "Questions: " + strconv.Itoa(questionsUnanswered(parsedModel)) + " / " + strconv.Itoa(count) + " " + questions
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{questions}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+ r.pdfColorBlack()
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "This chapter lists custom questions that arose during the threat modeling process.")
+
+ if len(parsedModel.Questions) == 0 {
+ r.pdfColorLightGray()
+ html.Write(5, "
")
+ html.Write(5, "No custom questions arose during the threat modeling process.")
+ }
+ r.pdfColorBlack()
+ for _, question := range sortedKeysOfQuestions(parsedModel) {
+ answer := parsedModel.Questions[question]
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdfColorBlack()
+ if len(strings.TrimSpace(answer)) > 0 {
+ html.Write(5, ""+uni(question)+"
")
+ html.Write(5, ""+uni(strings.TrimSpace(answer))+"")
+ } else {
+ ColorModelFailure(r.pdf)
+ html.Write(5, ""+uni(question)+"
")
+ r.pdfColorLightGray()
+ html.Write(5, "- answer pending -")
+ r.pdfColorBlack()
+ }
+ }
+}
+
+func sortedKeysOfQuestions(parsedModel *types.ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range parsedModel.Questions {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (r *pdfReporter) createTagListing(parsedModel *types.ParsedModel) {
+ r.pdf.SetTextColor(0, 0, 0)
+ chapTitle := "Tag Listing"
+ r.addHeadline(chapTitle, false)
+ r.defineLinkTarget("{tag-listing}")
+ r.currentChapterTitleBreadcrumb = chapTitle
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "This chapter lists what tags are used by which elements.")
+ r.pdfColorBlack()
+ sorted := parsedModel.TagsAvailable
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ description := "" // TODO: add some separation texts to distinguish between technical assets and data assets etc. for example?
+ for _, techAsset := range sortedTechnicalAssetsByTitle(parsedModel) {
+ if contains(techAsset.Tags, tag) {
+ if len(description) > 0 {
+ description += ", "
+ }
+ description += techAsset.Title
+ }
+ for _, commLink := range techAsset.CommunicationLinksSorted() {
+ if contains(commLink.Tags, tag) {
+ if len(description) > 0 {
+ description += ", "
+ }
+ description += commLink.Title
+ }
+ }
+ }
+ for _, dataAsset := range sortedDataAssetsByTitle(parsedModel) {
+ if contains(dataAsset.Tags, tag) {
+ if len(description) > 0 {
+ description += ", "
+ }
+ description += dataAsset.Title
+ }
+ }
+ for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) {
+ if contains(trustBoundary.Tags, tag) {
+ if len(description) > 0 {
+ description += ", "
+ }
+ description += trustBoundary.Title
+ }
+ }
+ for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) {
+ if contains(sharedRuntime.Tags, tag) {
+ if len(description) > 0 {
+ description += ", "
+ }
+ description += sharedRuntime.Title
+ }
+ }
+ if len(description) > 0 {
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdfColorBlack()
+ html.Write(5, ""+tag+"
")
+ html.Write(5, description)
+ }
+ }
+}
+
+func sortedSharedRuntimesByTitle(parsedModel *types.ParsedModel) []types.SharedRuntime {
+ result := make([]types.SharedRuntime, 0)
+ for _, runtime := range parsedModel.SharedRuntimes {
+ result = append(result, runtime)
+ }
+ sort.Sort(types.BySharedRuntimeTitleSort(result))
+ return result
+}
+
+func sortedTechnicalAssetsByTitle(parsedModel *types.ParsedModel) []types.TechnicalAsset {
+ assets := make([]types.TechnicalAsset, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ assets = append(assets, asset)
+ }
+ sort.Sort(types.ByTechnicalAssetTitleSort(assets))
+ return assets
+}
+
+func (r *pdfReporter) createRiskCategories(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ // category title
+ title := "Identified Risks by Vulnerability Category"
+ r.pdfColorBlack()
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{intro-risks-by-vulnerability-category}")
+ html := r.pdf.HTMLBasicNew()
+ var text strings.Builder
+ text.WriteString("In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " +
+ "of which " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel))) + " are rated as critical, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel))) + " as high, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel))) + " as elevated, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel))) + " as medium, " +
+ "and " + strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel))) + " as low. " +
+ "
These risks are distributed across " + strconv.Itoa(len(parsedModel.GeneratedRisksByCategory)) + " vulnerability categories. ")
+ text.WriteString("The following sub-chapters of this section describe each identified risk category.") // TODO more explanation text
+ html.Write(5, text.String())
+ text.Reset()
+ r.currentChapterTitleBreadcrumb = title
+ for _, category := range types.SortedRiskCategories(parsedModel) {
+ risksStr := types.SortedRisksOfCategory(parsedModel, category)
+
+ // category color
+ switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+
+ // category title
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk"
+ if len(risksStr) != 1 {
+ suffix += "s"
+ }
+ title := category.Title + ": " + suffix
+ r.addHeadline(uni(title), true)
+ r.pdfColorBlack()
+ r.defineLinkTarget("{" + category.Id + "}")
+ r.currentChapterTitleBreadcrumb = title
+
+ // category details
+ var text strings.Builder
+ cweLink := "n/a"
+ if category.CWE > 0 {
+ cweLink = "CWE " +
+ strconv.Itoa(category.CWE) + ""
+ }
+ text.WriteString("Description (" + category.STRIDE.Title() + "): " + cweLink + "
")
+ text.WriteString(category.Description)
+ text.WriteString("
Impact
")
+ text.WriteString(category.Impact)
+ text.WriteString("
Detection Logic
")
+ text.WriteString(category.DetectionLogic)
+ text.WriteString("
Risk Rating
")
+ text.WriteString(category.RiskAssessment)
+ html.Write(5, text.String())
+ text.Reset()
+ ColorRiskStatusFalsePositive(r.pdf)
+ text.WriteString("
False Positives
")
+ text.WriteString(category.FalsePositives)
+ html.Write(5, text.String())
+ text.Reset()
+ ColorRiskStatusMitigated(r.pdf)
+ text.WriteString("
Mitigation (" + category.Function.Title() + "): " + category.Action + "
")
+ text.WriteString(category.Mitigation)
+
+ asvsChapter := category.ASVS
+ if len(asvsChapter) == 0 {
+ text.WriteString("
ASVS Chapter: n/a")
+ } else {
+ text.WriteString("
ASVS Chapter: " + asvsChapter + "")
+ }
+
+ cheatSheetLink := category.CheatSheet
+ if len(cheatSheetLink) == 0 {
+ cheatSheetLink = "n/a"
+ } else {
+ lastLinkParts := strings.Split(cheatSheetLink, "/")
+ linkText := lastLinkParts[len(lastLinkParts)-1]
+ if strings.HasSuffix(linkText, ".html") || strings.HasSuffix(linkText, ".htm") {
+ var extension = filepath.Ext(linkText)
+ linkText = linkText[0 : len(linkText)-len(extension)]
+ }
+ cheatSheetLink = "" + linkText + ""
+ }
+ text.WriteString("
Cheat Sheet: " + cheatSheetLink)
+
+ text.WriteString("
Check
")
+ text.WriteString(category.Check)
+
+ html.Write(5, text.String())
+ text.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+
+ // risk details
+ r.pageBreak()
+ r.pdf.SetY(36)
+ text.WriteString("Risk Findings
")
+ times := strconv.Itoa(len(risksStr)) + " time"
+ if len(risksStr) > 1 {
+ times += "s"
+ }
+ text.WriteString("The risk " + category.Title + " was found " + times + " in the analyzed architecture to be " +
+ "potentially possible. Each spot should be checked individually by reviewing the implementation whether all " +
+ "controls have been applied properly in order to mitigate each risk.
")
+ html.Write(5, text.String())
+ text.Reset()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.
")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ oldLeft, _, _, _ := r.pdf.GetMargins()
+ headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false
+ for _, risk := range risksStr {
+ text.WriteString("
")
+ html.Write(5, text.String())
+ text.Reset()
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ switch risk.Severity {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ if !headlineCriticalWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft)
+ text.WriteString("
Critical Risk Severity
")
+ html.Write(5, text.String())
+ text.Reset()
+ headlineCriticalWritten = true
+ }
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ if !headlineHighWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft)
+ text.WriteString("
High Risk Severity
")
+ html.Write(5, text.String())
+ text.Reset()
+ headlineHighWritten = true
+ }
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ if !headlineElevatedWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft)
+ text.WriteString("
Elevated Risk Severity
")
+ html.Write(5, text.String())
+ text.Reset()
+ headlineElevatedWritten = true
+ }
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ if !headlineMediumWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft)
+ text.WriteString("
Medium Risk Severity
")
+ html.Write(5, text.String())
+ text.Reset()
+ headlineMediumWritten = true
+ }
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ if !headlineLowWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft)
+ text.WriteString("
Low Risk Severity
")
+ html.Write(5, text.String())
+ text.Reset()
+ headlineLowWritten = true
+ }
+ default:
+ r.pdfColorBlack()
+ }
+ if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ r.pdfColorBlack()
+ }
+ posY := r.pdf.GetY()
+ r.pdf.SetLeftMargin(oldLeft + 10)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.")
+ text.WriteString("
")
+ html.Write(5, text.String())
+ text.Reset()
+ r.pdfColorGray()
+ r.pdf.SetFont("Helvetica", "", fontSizeVerySmall)
+ r.pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ if len(risk.MostRelevantSharedRuntimeId) > 0 {
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantSharedRuntimeId])
+ } else if len(risk.MostRelevantTrustBoundaryId) > 0 {
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantTrustBoundaryId])
+ } else if len(risk.MostRelevantTechnicalAssetId) > 0 {
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.MostRelevantTechnicalAssetId])
+ }
+ r.writeRiskTrackingStatus(parsedModel, risk)
+ r.pdf.SetLeftMargin(oldLeft)
+ html.Write(5, text.String())
+ text.Reset()
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+ }
+}
+
+func (r *pdfReporter) writeRiskTrackingStatus(parsedModel *types.ParsedModel, risk types.Risk) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ tracking := risk.GetRiskTracking(parsedModel)
+ r.pdfColorBlack()
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ switch tracking.Status {
+ case types.Unchecked:
+ ColorRiskStatusUnchecked(r.pdf)
+ case types.InDiscussion:
+ ColorRiskStatusInDiscussion(r.pdf)
+ case types.Accepted:
+ ColorRiskStatusAccepted(r.pdf)
+ case types.InProgress:
+ ColorRiskStatusInProgress(r.pdf)
+ case types.Mitigated:
+ ColorRiskStatusMitigated(r.pdf)
+ case types.FalsePositive:
+ ColorRiskStatusFalsePositive(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ if tracking.Status == types.Unchecked {
+ r.pdf.SetFont("Helvetica", "B", fontSizeSmall)
+ }
+ r.pdf.CellFormat(25, 4, tracking.Status.Title(), "0", 0, "B", false, 0, "")
+ if tracking.Status != types.Unchecked {
+ dateStr := tracking.Date.Format("2006-01-02")
+ if dateStr == "0001-01-01" {
+ dateStr = ""
+ }
+ justificationStr := tracking.Justification
+ r.pdfColorGray()
+ r.pdf.CellFormat(20, 4, dateStr, "0", 0, "B", false, 0, "")
+ r.pdf.CellFormat(35, 4, uni(tracking.CheckedBy), "0", 0, "B", false, 0, "")
+ r.pdf.CellFormat(35, 4, uni(tracking.Ticket), "0", 0, "B", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdfColorBlack()
+ r.pdf.CellFormat(10, 4, "", "0", 0, "", false, 0, "")
+ r.pdf.MultiCell(170, 4, uni(justificationStr), "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ } else {
+ r.pdf.Ln(-1)
+ }
+ r.pdfColorBlack()
+}
+
+func (r *pdfReporter) createTechnicalAssets(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ // category title
+ title := "Identified Risks by Technical Asset"
+ r.pdfColorBlack()
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{intro-risks-by-technical-asset}")
+ html := r.pdf.HTMLBasicNew()
+ var text strings.Builder
+ text.WriteString("In total " + strconv.Itoa(types.TotalRiskCount(parsedModel)) + " potential risks have been identified during the threat modeling process " +
+ "of which " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel))) + " are rated as critical, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel))) + " as high, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel))) + " as elevated, " +
+ "" + strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel))) + " as medium, " +
+ "and " + strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel))) + " as low. " +
+ "
These risks are distributed across " + strconv.Itoa(len(parsedModel.InScopeTechnicalAssets())) + " in-scope technical assets. ")
+ text.WriteString("The following sub-chapters of this section describe each identified risk grouped by technical asset. ") // TODO more explanation text
+ text.WriteString("The RAA value of a technical asset is the calculated \"Relative Attacker Attractiveness\" value in percent.")
+ html.Write(5, text.String())
+ text.Reset()
+ r.currentChapterTitleBreadcrumb = title
+ for _, technicalAsset := range sortedTechnicalAssetsByRiskSeverityAndTitle(parsedModel) {
+ risksStr := technicalAsset.GeneratedRisks(parsedModel)
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk"
+ if len(risksStr) != 1 {
+ suffix += "s"
+ }
+ if technicalAsset.OutOfScope {
+ r.pdfColorOutOfScope()
+ suffix = "out-of-scope"
+ } else {
+ switch types.HighestSeverityStillAtRisk(parsedModel, risksStr) {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+ }
+
+ // asset title
+ title := technicalAsset.Title + ": " + suffix
+ r.addHeadline(uni(title), true)
+ r.pdfColorBlack()
+ r.defineLinkTarget("{" + technicalAsset.Id + "}")
+ r.currentChapterTitleBreadcrumb = title
+
+ // asset description
+ html := r.pdf.HTMLBasicNew()
+ var text strings.Builder
+ text.WriteString("Description
")
+ text.WriteString(uni(technicalAsset.Description))
+ html.Write(5, text.String())
+ text.Reset()
+ r.pdf.SetTextColor(0, 0, 0)
+
+ // and more metadata of asset in tabular view
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdfColorBlack()
+ r.pdf.CellFormat(190, 6, "Identified Risks of Asset", "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ oldLeft, _, _, _ := r.pdf.GetMargins()
+ if len(risksStr) > 0 {
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(15)
+ /*
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(185, 6, strconv.Itoa(len(risksStr))+" risksStr in total were identified", "0", 0, "", false, 0, "")
+ */
+ headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false
+ r.pdf.Ln(-1)
+ for _, risk := range risksStr {
+ text.WriteString("
")
+ html.Write(5, text.String())
+ text.Reset()
+ if r.pdf.GetY() > 250 { // 250 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ switch risk.Severity {
+ case types.CriticalSeverity:
+ ColorCriticalRisk(r.pdf)
+ if !headlineCriticalWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft + 3)
+ html.Write(5, "
Critical Risk Severity
")
+ headlineCriticalWritten = true
+ }
+ case types.HighSeverity:
+ ColorHighRisk(r.pdf)
+ if !headlineHighWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft + 3)
+ html.Write(5, "
High Risk Severity
")
+ headlineHighWritten = true
+ }
+ case types.ElevatedSeverity:
+ ColorElevatedRisk(r.pdf)
+ if !headlineElevatedWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft + 3)
+ html.Write(5, "
Elevated Risk Severity
")
+ headlineElevatedWritten = true
+ }
+ case types.MediumSeverity:
+ ColorMediumRisk(r.pdf)
+ if !headlineMediumWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft + 3)
+ html.Write(5, "
Medium Risk Severity
")
+ headlineMediumWritten = true
+ }
+ case types.LowSeverity:
+ ColorLowRisk(r.pdf)
+ if !headlineLowWritten {
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(oldLeft + 3)
+ html.Write(5, "
Low Risk Severity
")
+ headlineLowWritten = true
+ }
+ default:
+ r.pdfColorBlack()
+ }
+ if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ r.pdfColorBlack()
+ }
+ posY := r.pdf.GetY()
+ r.pdf.SetLeftMargin(oldLeft + 10)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.")
+ text.WriteString("
")
+ html.Write(5, text.String())
+ text.Reset()
+
+ r.pdf.SetFont("Helvetica", "", fontSizeVerySmall)
+ r.pdfColorGray()
+ r.pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false)
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[risk.CategoryId])
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.writeRiskTrackingStatus(parsedModel, risk)
+ r.pdf.SetLeftMargin(oldLeft)
+ }
+ } else {
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetLeftMargin(15)
+ text := "No risksStr were identified."
+ if technicalAsset.OutOfScope {
+ text = "Asset was defined as out-of-scope."
+ }
+ html.Write(5, text)
+ r.pdf.Ln(-1)
+ }
+ r.pdf.SetLeftMargin(oldLeft)
+
+ r.pdf.Ln(-1)
+ r.pdf.Ln(4)
+ if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Asset Information", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Id, "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Type.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Usage.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "RAA:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ textRAA := fmt.Sprintf("%.0f", technicalAsset.RAA) + " %"
+ if technicalAsset.OutOfScope {
+ r.pdfColorGray()
+ textRAA = "out-of-scope"
+ }
+ r.pdf.MultiCell(145, 6, textRAA, "0", "0", false)
+ r.pdfColorBlack()
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Size:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Size.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Technology:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Technology.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := technicalAsset.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Internet:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Internet), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Machine:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Machine.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Encryption:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, technicalAsset.Encryption.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Multi-Tenant:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.MultiTenant), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Redundant:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Redundant), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Custom-Developed:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.CustomDevelopedParts), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Client by Human:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.UsedAsClientByHuman), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Data Processed:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsProcessedText := ""
+ for _, dataAsset := range technicalAsset.DataAssetsProcessedSorted(parsedModel) {
+ if len(dataAssetsProcessedText) > 0 {
+ dataAssetsProcessedText += ", "
+ }
+ dataAssetsProcessedText += dataAsset.Title
+ }
+ if len(dataAssetsProcessedText) == 0 {
+ r.pdfColorGray()
+ dataAssetsProcessedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(dataAssetsProcessedText), "0", "0", false)
+
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Data Stored:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsStoredText := ""
+ for _, dataAsset := range technicalAsset.DataAssetsStoredSorted(parsedModel) {
+ if len(dataAssetsStoredText) > 0 {
+ dataAssetsStoredText += ", "
+ }
+ dataAssetsStoredText += dataAsset.Title
+ }
+ if len(dataAssetsStoredText) == 0 {
+ r.pdfColorGray()
+ dataAssetsStoredText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(dataAssetsStoredText), "0", "0", false)
+
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Formats Accepted:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ formatsAcceptedText := ""
+ for _, formatAccepted := range technicalAsset.DataFormatsAcceptedSorted() {
+ if len(formatsAcceptedText) > 0 {
+ formatsAcceptedText += ", "
+ }
+ formatsAcceptedText += formatAccepted.Title()
+ }
+ if len(formatsAcceptedText) == 0 {
+ r.pdfColorGray()
+ formatsAcceptedText = "none of the special data formats accepted"
+ }
+ r.pdf.MultiCell(145, 6, formatsAcceptedText, "0", "0", false)
+
+ r.pdf.Ln(-1)
+ r.pdf.Ln(4)
+ if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Asset Rating", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, uni(technicalAsset.Owner), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, technicalAsset.Confidentiality.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, technicalAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, technicalAsset.Integrity.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, technicalAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, technicalAsset.Availability.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, technicalAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, uni(technicalAsset.JustificationCiaRating), "0", "0", false)
+
+ if technicalAsset.OutOfScope {
+ r.pdf.Ln(-1)
+ r.pdf.Ln(4)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Asset Out-of-Scope Justification", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.MultiCell(190, 6, uni(technicalAsset.JustificationOutOfScope), "0", "0", false)
+ r.pdf.Ln(-1)
+ }
+ r.pdf.Ln(-1)
+
+ if len(technicalAsset.CommunicationLinks) > 0 {
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Outgoing Communication Links: "+strconv.Itoa(len(technicalAsset.CommunicationLinks)), "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Target technical asset names are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ for _, outgoingCommLink := range technicalAsset.CommunicationLinksSorted() {
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(185, 6, uni(outgoingCommLink.Title)+" (outgoing)", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.MultiCell(185, 6, uni(outgoingCommLink.Description), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Target:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(125, 6, uni(parsedModel.TechnicalAssets[outgoingCommLink.TargetId].Title), "0", "0", false)
+ r.pdf.Link(60, r.pdf.GetY()-5, 70, 5, r.tocLinkIdByAssetId[outgoingCommLink.TargetId])
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, outgoingCommLink.Protocol.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Protocol.IsEncrypted()), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, outgoingCommLink.Authentication.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, outgoingCommLink.Authorization.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Readonly), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, outgoingCommLink.Usage.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := outgoingCommLink.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.VPN), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.IpFiltered), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsSentText := ""
+ for _, dataAsset := range outgoingCommLink.DataAssetsSentSorted(parsedModel) {
+ if len(dataAssetsSentText) > 0 {
+ dataAssetsSentText += ", "
+ }
+ dataAssetsSentText += dataAsset.Title
+ }
+ if len(dataAssetsSentText) == 0 {
+ r.pdfColorGray()
+ dataAssetsSentText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsReceivedText := ""
+ for _, dataAsset := range outgoingCommLink.DataAssetsReceivedSorted(parsedModel) {
+ if len(dataAssetsReceivedText) > 0 {
+ dataAssetsReceivedText += ", "
+ }
+ dataAssetsReceivedText += dataAsset.Title
+ }
+ if len(dataAssetsReceivedText) == 0 {
+ r.pdfColorGray()
+ dataAssetsReceivedText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false)
+ r.pdf.Ln(-1)
+ }
+ }
+
+ incomingCommLinks := parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ if len(incomingCommLinks) > 0 {
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Incoming Communication Links: "+strconv.Itoa(len(incomingCommLinks)), "0", 0, "", false, 0, "")
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Source technical asset names are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.Ln(-1)
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ for _, incomingCommLink := range incomingCommLinks {
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorBlack()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(185, 6, uni(incomingCommLink.Title)+" (incoming)", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.MultiCell(185, 6, uni(incomingCommLink.Description), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdf.Ln(-1)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Source:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, uni(parsedModel.TechnicalAssets[incomingCommLink.SourceId].Title), "0", "0", false)
+ r.pdf.Link(60, r.pdf.GetY()-5, 70, 5, r.tocLinkIdByAssetId[incomingCommLink.SourceId])
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, incomingCommLink.Protocol.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Protocol.IsEncrypted()), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, incomingCommLink.Authentication.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, incomingCommLink.Authorization.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Readonly), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, incomingCommLink.Usage.String(), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := incomingCommLink.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.VPN), "0", "0", false)
+ if r.pdf.GetY() > 270 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.IpFiltered), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsSentText := ""
+ // yep, here we reverse the sent/received direction, as it's the incoming stuff
+ for _, dataAsset := range incomingCommLink.DataAssetsSentSorted(parsedModel) {
+ if len(dataAssetsSentText) > 0 {
+ dataAssetsSentText += ", "
+ }
+ dataAssetsSentText += dataAsset.Title
+ }
+ if len(dataAssetsSentText) == 0 {
+ r.pdfColorGray()
+ dataAssetsSentText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ dataAssetsReceivedText := ""
+ // yep, here we reverse the sent/received direction, as it's the incoming stuff
+ for _, dataAsset := range incomingCommLink.DataAssetsReceivedSorted(parsedModel) {
+ if len(dataAssetsReceivedText) > 0 {
+ dataAssetsReceivedText += ", "
+ }
+ dataAssetsReceivedText += dataAsset.Title
+ }
+ if len(dataAssetsReceivedText) == 0 {
+ r.pdfColorGray()
+ dataAssetsReceivedText = "none"
+ }
+ r.pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false)
+ r.pdf.Ln(-1)
+ }
+ }
+ }
+}
+
+func (r *pdfReporter) createDataAssets(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ title := "Identified Data Breach Probabilities by Data Asset"
+ r.pdfColorBlack()
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{intro-risks-by-data-asset}")
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, "In total "+strconv.Itoa(types.TotalRiskCount(parsedModel))+" potential risks have been identified during the threat modeling process "+
+ "of which "+
+ ""+strconv.Itoa(len(types.FilteredByOnlyCriticalRisks(parsedModel)))+" are rated as critical, "+
+ ""+strconv.Itoa(len(types.FilteredByOnlyHighRisks(parsedModel)))+" as high, "+
+ ""+strconv.Itoa(len(types.FilteredByOnlyElevatedRisks(parsedModel)))+" as elevated, "+
+ ""+strconv.Itoa(len(types.FilteredByOnlyMediumRisks(parsedModel)))+" as medium, "+
+ "and "+strconv.Itoa(len(types.FilteredByOnlyLowRisks(parsedModel)))+" as low. "+
+ "
These risks are distributed across "+strconv.Itoa(len(parsedModel.DataAssets))+" data assets. ")
+ html.Write(5, "The following sub-chapters of this section describe the derived data breach probabilities grouped by data asset.
") // TODO more explanation text
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdfColorGray()
+ html.Write(5, "Technical asset names and risk IDs are clickable and link to the corresponding chapter.")
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.currentChapterTitleBreadcrumb = title
+ for _, dataAsset := range sortedDataAssetsByDataBreachProbabilityAndTitle(parsedModel) {
+ if r.pdf.GetY() > 280 { // 280 as only small font previously (not 250)
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ r.pdfColorBlack()
+ switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel) {
+ case types.Probable:
+ ColorHighRisk(r.pdf)
+ case types.Possible:
+ ColorMediumRisk(r.pdf)
+ case types.Improbable:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) {
+ r.pdfColorBlack()
+ }
+ risksStr := dataAsset.IdentifiedDataBreachProbabilityRisks(parsedModel)
+ countStillAtRisk := len(types.ReduceToOnlyStillAtRisk(parsedModel, risksStr))
+ suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risksStr)) + " Risk"
+ if len(risksStr) != 1 {
+ suffix += "s"
+ }
+ title := uni(dataAsset.Title) + ": " + suffix
+ r.addHeadline(title, true)
+ r.defineLinkTarget("{data:" + dataAsset.Id + "}")
+ r.pdfColorBlack()
+ html.Write(5, uni(dataAsset.Description))
+ html.Write(5, "
")
+
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ /*
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Indirect Breach:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ probability := dataAsset.IdentifiedDataBreachProbability()
+ dataBreachText := probability.String()
+ switch probability {
+ case model.Probable:
+ ColorHighRisk(r.pdf)
+ case model.Possible:
+ ColorMediumRisk(r.pdf)
+ case model.Improbable:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if !dataAsset.IsDataBreachPotentialStillAtRisk() {
+ r.pdfColorBlack()
+ dataBreachText = "none"
+ }
+ r.pdf.MultiCell(145, 6, dataBreachText, "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ */
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, dataAsset.Id, "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, dataAsset.Usage.String(), "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Quantity:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, dataAsset.Quantity.String(), "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := dataAsset.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Origin:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, uni(dataAsset.Origin), "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, uni(dataAsset.Owner), "0", "0", false)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, dataAsset.Confidentiality.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, dataAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, dataAsset.Integrity.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, dataAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.CellFormat(40, 6, dataAsset.Availability.String(), "0", 0, "", false, 0, "")
+ r.pdfColorGray()
+ r.pdf.CellFormat(115, 6, dataAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.Ln(-1)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, uni(dataAsset.JustificationCiaRating), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Processed by:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ processedByText := ""
+ for _, dataAsset := range dataAsset.ProcessedByTechnicalAssetsSorted(parsedModel) {
+ if len(processedByText) > 0 {
+ processedByText += ", "
+ }
+ processedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(processedByText) == 0 {
+ r.pdfColorGray()
+ processedByText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(processedByText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Stored by:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ storedByText := ""
+ for _, dataAsset := range dataAsset.StoredByTechnicalAssetsSorted(parsedModel) {
+ if len(storedByText) > 0 {
+ storedByText += ", "
+ }
+ storedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(storedByText) == 0 {
+ r.pdfColorGray()
+ storedByText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(storedByText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Sent via:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ sentViaText := ""
+ for _, commLink := range dataAsset.SentViaCommLinksSorted(parsedModel) {
+ if len(sentViaText) > 0 {
+ sentViaText += ", "
+ }
+ sentViaText += commLink.Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(sentViaText) == 0 {
+ r.pdfColorGray()
+ sentViaText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(sentViaText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Received via:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ receivedViaText := ""
+ for _, commLink := range dataAsset.ReceivedViaCommLinksSorted(parsedModel) {
+ if len(receivedViaText) > 0 {
+ receivedViaText += ", "
+ }
+ receivedViaText += commLink.Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(receivedViaText) == 0 {
+ r.pdfColorGray()
+ receivedViaText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(receivedViaText), "0", "0", false)
+
+ /*
+ // where is this data asset at risk (i.e. why)
+ risksByTechAssetId := dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId()
+ techAssetsResponsible := make([]model.TechnicalAsset, 0)
+ for techAssetId, _ := range risksByTechAssetId {
+ techAssetsResponsible = append(techAssetsResponsible, parsedModel.TechnicalAssets[techAssetId])
+ }
+ sort.Sort(model.ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk(techAssetsResponsible))
+ assetStr := "assets"
+ if len(techAssetsResponsible) == 1 {
+ assetStr = "asset"
+ }
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Risk via:", "0", 0, "", false, 0, "")
+ if len(techAssetsResponsible) == 0 {
+ r.pdfColorGray()
+ r.pdf.MultiCell(145, 6, "This data asset is not directly at risk via any technical asset.", "0", "0", false)
+ } else {
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, "This data asset is at direct risk via "+strconv.Itoa(len(techAssetsResponsible))+" technical "+assetStr+":", "0", "0", false)
+ for _, techAssetResponsible := range techAssetsResponsible {
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ switch model.HighestSeverityStillAtRisk(techAssetResponsible.GeneratedRisks()) {
+ case model.High:
+ ColorHighRisk(r.pdf)
+ case model.Medium:
+ ColorMediumRisk(r.pdf)
+ case model.Low:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ risksStr := techAssetResponsible.GeneratedRisks()
+ if len(model.ReduceToOnlyStillAtRisk(risksStr)) == 0 {
+ r.pdfColorBlack()
+ }
+ riskStr := "risksStr"
+ if len(risksStr) == 1 {
+ riskStr = "risk"
+ }
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ posY := r.pdf.GetY()
+ risksResponsible := techAssetResponsible.GeneratedRisks()
+ risksResponsibleStillAtRisk := model.ReduceToOnlyStillAtRisk(risksResponsible)
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdf.MultiCell(185, 6, uni(techAssetResponsible.Title)+": "+strconv.Itoa(len(risksResponsibleStillAtRisk))+" / "+strconv.Itoa(len(risksResponsible))+" "+riskStr, "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, tocLinkIdByAssetId[techAssetResponsible.Id])
+ }
+ r.pdfColorBlack()
+ }
+ */
+
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Data Breach:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ dataBreachProbability := dataAsset.IdentifiedDataBreachProbabilityStillAtRisk(parsedModel)
+ riskText := dataBreachProbability.String()
+ switch dataBreachProbability {
+ case types.Probable:
+ ColorHighRisk(r.pdf)
+ case types.Possible:
+ ColorMediumRisk(r.pdf)
+ case types.Improbable:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if !dataAsset.IsDataBreachPotentialStillAtRisk(parsedModel) {
+ r.pdfColorBlack()
+ riskText = "none"
+ }
+ r.pdf.MultiCell(145, 6, riskText, "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+
+ // how can is this data asset be indirectly lost (i.e. why)
+ dataBreachRisksStillAtRisk := dataAsset.IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel)
+ types.SortByDataBreachProbability(dataBreachRisksStillAtRisk, parsedModel)
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Data Breach Risks:", "0", 0, "", false, 0, "")
+ if len(dataBreachRisksStillAtRisk) == 0 {
+ r.pdfColorGray()
+ r.pdf.MultiCell(145, 6, "This data asset has no data breach potential.", "0", "0", false)
+ } else {
+ r.pdfColorBlack()
+ riskRemainingStr := "risksStr"
+ if countStillAtRisk == 1 {
+ riskRemainingStr = "risk"
+ }
+ r.pdf.MultiCell(145, 6, "This data asset has data breach potential because of "+
+ ""+strconv.Itoa(countStillAtRisk)+" remaining "+riskRemainingStr+":", "0", "0", false)
+ for _, dataBreachRisk := range dataBreachRisksStillAtRisk {
+ if r.pdf.GetY() > 280 { // 280 as only small font here
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ switch dataBreachRisk.DataBreachProbability {
+ case types.Probable:
+ ColorHighRisk(r.pdf)
+ case types.Possible:
+ ColorMediumRisk(r.pdf)
+ case types.Improbable:
+ ColorLowRisk(r.pdf)
+ default:
+ r.pdfColorBlack()
+ }
+ if !dataBreachRisk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ r.pdfColorBlack()
+ }
+ r.pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
+ posY := r.pdf.GetY()
+ r.pdf.SetFont("Helvetica", "", fontSizeVerySmall)
+ r.pdf.MultiCell(185, 5, dataBreachRisk.DataBreachProbability.Title()+": "+uni(dataBreachRisk.SyntheticId), "0", "0", false)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.Link(20, posY, 180, r.pdf.GetY()-posY, r.tocLinkIdByAssetId[dataBreachRisk.CategoryId])
+ }
+ r.pdfColorBlack()
+ }
+ }
+}
+
+func (r *pdfReporter) createTrustBoundaries(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ title := "Trust Boundaries"
+ r.pdfColorBlack()
+ r.addHeadline(title, false)
+
+ html := r.pdf.HTMLBasicNew()
+ word := "has"
+ if len(parsedModel.TrustBoundaries) > 1 {
+ word = "have"
+ }
+ html.Write(5, "In total "+strconv.Itoa(len(parsedModel.TrustBoundaries))+" trust boundaries "+word+" been "+
+ "modeled during the threat modeling process.")
+ r.currentChapterTitleBreadcrumb = title
+ for _, trustBoundary := range sortedTrustBoundariesByTitle(parsedModel) {
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ ColorTwilight(r.pdf)
+ if !trustBoundary.Type.IsNetworkBoundary() {
+ r.pdfColorLightGray()
+ }
+ html.Write(5, ""+uni(trustBoundary.Title)+"
")
+ r.defineLinkTarget("{boundary:" + trustBoundary.Id + "}")
+ html.Write(5, uni(trustBoundary.Description))
+ html.Write(5, "
")
+
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, trustBoundary.Id, "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "")
+ ColorTwilight(r.pdf)
+ if !trustBoundary.Type.IsNetworkBoundary() {
+ r.pdfColorLightGray()
+ }
+ r.pdf.MultiCell(145, 6, trustBoundary.Type.String(), "0", "0", false)
+ r.pdfColorBlack()
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := trustBoundary.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Assets inside:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ assetsInsideText := ""
+ for _, assetKey := range trustBoundary.TechnicalAssetsInside {
+ if len(assetsInsideText) > 0 {
+ assetsInsideText += ", "
+ }
+ assetsInsideText += parsedModel.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(assetsInsideText) == 0 {
+ r.pdfColorGray()
+ assetsInsideText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Boundaries nested:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ boundariesNestedText := ""
+ for _, assetKey := range trustBoundary.TrustBoundariesNested {
+ if len(boundariesNestedText) > 0 {
+ boundariesNestedText += ", "
+ }
+ boundariesNestedText += parsedModel.TrustBoundaries[assetKey].Title
+ }
+ if len(boundariesNestedText) == 0 {
+ r.pdfColorGray()
+ boundariesNestedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(boundariesNestedText), "0", "0", false)
+ }
+}
+
+func questionsUnanswered(parsedModel *types.ParsedModel) int {
+ result := 0
+ for _, answer := range parsedModel.Questions {
+ if len(strings.TrimSpace(answer)) == 0 {
+ result++
+ }
+ }
+ return result
+}
+
+func (r *pdfReporter) createSharedRuntimes(parsedModel *types.ParsedModel) {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ title := "Shared Runtimes"
+ r.pdfColorBlack()
+ r.addHeadline(title, false)
+
+ html := r.pdf.HTMLBasicNew()
+ word, runtime := "has", "runtime"
+ if len(parsedModel.SharedRuntimes) > 1 {
+ word, runtime = "have", "runtimes"
+ }
+ html.Write(5, "In total "+strconv.Itoa(len(parsedModel.SharedRuntimes))+" shared "+runtime+" "+word+" been "+
+ "modeled during the threat modeling process.")
+ r.currentChapterTitleBreadcrumb = title
+ for _, sharedRuntime := range sortedSharedRuntimesByTitle(parsedModel) {
+ r.pdfColorBlack()
+ if r.pdf.GetY() > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ html.Write(5, "
")
+ }
+ html.Write(5, ""+uni(sharedRuntime.Title)+"
")
+ r.defineLinkTarget("{runtime:" + sharedRuntime.Id + "}")
+ html.Write(5, uni(sharedRuntime.Description))
+ html.Write(5, "
")
+
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(145, 6, sharedRuntime.Id, "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ tagsUsedText := ""
+ sorted := sharedRuntime.Tags
+ sort.Strings(sorted)
+ for _, tag := range sorted {
+ if len(tagsUsedText) > 0 {
+ tagsUsedText += ", "
+ }
+ tagsUsedText += tag
+ }
+ if len(tagsUsedText) == 0 {
+ r.pdfColorGray()
+ tagsUsedText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
+
+ if r.pdf.GetY() > 265 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ }
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(40, 6, "Assets running:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ assetsInsideText := ""
+ for _, assetKey := range sharedRuntime.TechnicalAssetsRunning {
+ if len(assetsInsideText) > 0 {
+ assetsInsideText += ", "
+ }
+ assetsInsideText += parsedModel.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back
+ }
+ if len(assetsInsideText) == 0 {
+ r.pdfColorGray()
+ assetsInsideText = "none"
+ }
+ r.pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false)
+ }
+}
+
+func (r *pdfReporter) createRiskRulesChecked(parsedModel *types.ParsedModel, modelFilename string, skipRiskRules string, buildTimestamp string, modelHash string, customRiskRules map[string]*model.CustomRisk) {
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Risk Rules Checked by Threagile"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{risk-rules-checked}")
+ r.currentChapterTitleBreadcrumb = title
+
+ html := r.pdf.HTMLBasicNew()
+ var strBuilder strings.Builder
+ r.pdfColorGray()
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ timestamp := time.Now()
+ strBuilder.WriteString("Threagile Version: " + docs.ThreagileVersion)
+ strBuilder.WriteString("
Threagile Build Timestamp: " + buildTimestamp)
+ strBuilder.WriteString("
Threagile Execution Timestamp: " + timestamp.Format("20060102150405"))
+ strBuilder.WriteString("
Model Filename: " + modelFilename)
+ strBuilder.WriteString("
Model Hash (SHA256): " + modelHash)
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+ r.pdfColorBlack()
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ strBuilder.WriteString("
Threagile (see https://threagile.io for more details) is an open-source toolkit for agile threat modeling, created by Christian Schneider (https://christian-schneider.net): It allows to model an architecture with its assets in an agile fashion as a YAML file " +
+ "directly inside the IDE. Upon execution of the Threagile toolkit all standard risk rules (as well as individual custom rules if present) " +
+ "are checked against the architecture model. At the time the Threagile toolkit was executed on the model input file " +
+ "the following risk rules were checked:")
+ html.Write(5, strBuilder.String())
+ strBuilder.Reset()
+
+ // TODO use the new run system to discover risk rules instead of hard-coding them here:
+ skippedRules := strings.Split(skipRiskRules, ",")
+ skipped := ""
+ r.pdf.Ln(-1)
+
+ for id, customRule := range customRiskRules {
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ if contains(skippedRules, id) {
+ skipped = "SKIPPED - "
+ } else {
+ skipped = ""
+ }
+ r.pdf.CellFormat(190, 3, skipped+customRule.Category.Title, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdf.CellFormat(190, 6, id, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "I", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Custom Risk Rule", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, customRule.Category.STRIDE.Title(), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, firstParagraph(customRule.Category.Description), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, customRule.Category.DetectionLogic, "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, customRule.Category.RiskAssessment, "0", "0", false)
+ }
+
+ for _, key := range sortedKeysOfIndividualRiskCategories(parsedModel) {
+ individualRiskCategory := parsedModel.IndividualRiskCategories[key]
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ r.pdf.CellFormat(190, 3, individualRiskCategory.Title, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdf.CellFormat(190, 6, individualRiskCategory.Id, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "I", fontSizeBody)
+ r.pdf.CellFormat(190, 6, "Individual Risk Category", "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, individualRiskCategory.STRIDE.Title(), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, firstParagraph(individualRiskCategory.Description), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, individualRiskCategory.DetectionLogic, "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, individualRiskCategory.RiskAssessment, "0", "0", false)
+ }
+
+ for _, rule := range risks.GetBuiltInRiskRules() {
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "B", fontSizeBody)
+ if contains(skippedRules, rule.Category().Id) {
+ skipped = "SKIPPED - "
+ } else {
+ skipped = ""
+ }
+ r.pdf.CellFormat(190, 3, skipped+rule.Category().Title, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeSmall)
+ r.pdf.CellFormat(190, 6, rule.Category().Id, "0", 0, "", false, 0, "")
+ r.pdf.Ln(-1)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, rule.Category().STRIDE.Title(), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, firstParagraph(rule.Category().Description), "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, rule.Category().DetectionLogic, "0", "0", false)
+ r.pdfColorGray()
+ r.pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
+ r.pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
+ r.pdfColorBlack()
+ r.pdf.MultiCell(160, 6, rule.Category().RiskAssessment, "0", "0", false)
+ }
+}
+
+func (r *pdfReporter) createTargetDescription(parsedModel *types.ParsedModel, baseFolder string) error {
+ uni := r.pdf.UnicodeTranslatorFromDescriptor("")
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Application Overview"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{target-overview}")
+ r.currentChapterTitleBreadcrumb = title
+
+ var intro strings.Builder
+ html := r.pdf.HTMLBasicNew()
+
+ intro.WriteString("Business Criticality
")
+ intro.WriteString("The overall business criticality of \"" + uni(parsedModel.Title) + "\" was rated as:
")
+ html.Write(5, intro.String())
+ criticality := parsedModel.BusinessCriticality
+ intro.Reset()
+ r.pdfColorGray()
+ intro.WriteString("( ")
+ if criticality == types.Archive {
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+ intro.WriteString("" + strings.ToUpper(types.Archive.String()) + "")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorGray()
+ } else {
+ intro.WriteString(types.Archive.String())
+ }
+ intro.WriteString(" | ")
+ if criticality == types.Operational {
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+ intro.WriteString("" + strings.ToUpper(types.Operational.String()) + "")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorGray()
+ } else {
+ intro.WriteString(types.Operational.String())
+ }
+ intro.WriteString(" | ")
+ if criticality == types.Important {
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+ intro.WriteString("" + strings.ToUpper(types.Important.String()) + "")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorGray()
+ } else {
+ intro.WriteString(types.Important.String())
+ }
+ intro.WriteString(" | ")
+ if criticality == types.Critical {
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+ intro.WriteString("" + strings.ToUpper(types.Critical.String()) + "")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorGray()
+ } else {
+ intro.WriteString(types.Critical.String())
+ }
+ intro.WriteString(" | ")
+ if criticality == types.MissionCritical {
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+ intro.WriteString("" + strings.ToUpper(types.MissionCritical.String()) + "")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorGray()
+ } else {
+ intro.WriteString(types.MissionCritical.String())
+ }
+ intro.WriteString(" )")
+ html.Write(5, intro.String())
+ intro.Reset()
+ r.pdfColorBlack()
+
+ intro.WriteString("
Business Overview
")
+ intro.WriteString(uni(parsedModel.BusinessOverview.Description))
+ html.Write(5, intro.String())
+ intro.Reset()
+ err := r.addCustomImages(parsedModel.BusinessOverview.Images, baseFolder, html)
+ if err != nil {
+ return fmt.Errorf("error adding custom images: %w", err)
+ }
+
+ intro.WriteString("
Technical Overview
")
+ intro.WriteString(uni(parsedModel.TechnicalOverview.Description))
+ html.Write(5, intro.String())
+ intro.Reset()
+ err = r.addCustomImages(parsedModel.TechnicalOverview.Images, baseFolder, html)
+ if err != nil {
+ return fmt.Errorf("error adding custom images: %w", err)
+ }
+ return nil
+}
+
+func (r *pdfReporter) addCustomImages(customImages []map[string]string, baseFolder string, html gofpdf.HTMLBasicType) error {
+ var text strings.Builder
+ for _, customImage := range customImages {
+ for imageFilename := range customImage {
+ imageFilenameWithoutPath := filepath.Base(imageFilename)
+ // check JPEG, PNG or GIF
+ extension := strings.ToLower(filepath.Ext(imageFilenameWithoutPath))
+ if extension == ".jpeg" || extension == ".jpg" || extension == ".png" || extension == ".gif" {
+ imageFullFilename := filepath.Join(baseFolder, imageFilenameWithoutPath)
+ heightWhenWidthIsFix, err := getHeightWhenWidthIsFix(imageFullFilename, 180)
+ if err != nil {
+ return fmt.Errorf("error getting height of image file: %w", err)
+ }
+ if r.pdf.GetY()+heightWhenWidthIsFix > 250 {
+ r.pageBreak()
+ r.pdf.SetY(36)
+ } else {
+ text.WriteString("
")
+ }
+ text.WriteString(customImage[imageFilename] + ":
")
+ html.Write(5, text.String())
+ text.Reset()
+
+ var options gofpdf.ImageOptions
+ options.ImageType = ""
+ r.pdf.RegisterImage(imageFullFilename, "")
+ r.pdf.ImageOptions(imageFullFilename, 15, r.pdf.GetY()+50, 170, 0, true, options, 0, "")
+ } else {
+ log.Print("Ignoring custom image file: ", imageFilenameWithoutPath)
+ }
+ }
+ }
+ return nil
+}
+
+// fileExists checks if a file exists and is not a directory before we
+// try using it to prevent further errors.
+func fileExists(filename string) bool {
+ info, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ return false
+ }
+ return !info.IsDir()
+}
+
+func getHeightWhenWidthIsFix(imageFullFilename string, width float64) (float64, error) {
+ if !fileExists(imageFullFilename) {
+ return 0, fmt.Errorf("image file does not exist (or is not readable as file): %s", filepath.Base(imageFullFilename))
+ }
+ /* #nosec imageFullFilename is not tainted (see caller restricting it to image files of model folder only) */
+ file, err := os.Open(imageFullFilename)
+ defer func() { _ = file.Close() }()
+ if err != nil {
+ return 0, fmt.Errorf("error opening image file: %w", err)
+ }
+ img, _, err := image.DecodeConfig(file)
+ if err != nil {
+ return 0, fmt.Errorf("error decoding image file: %w", err)
+ }
+ return float64(img.Height) / (float64(img.Width) / width), nil
+}
+
+func (r *pdfReporter) embedDataFlowDiagram(diagramFilenamePNG string, tempFolder string) {
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Data-Flow Diagram"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{data-flow-diagram}")
+ r.currentChapterTitleBreadcrumb = title
+
+ var intro strings.Builder
+ intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " +
+ "overview of the data-flow between technical assets. " +
+ "The RAA value is the calculated Relative Attacker Attractiveness in percent. " +
+ "For a full high-resolution version of this diagram please refer to the PNG image file alongside this report.")
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, intro.String())
+
+ // check to rotate the image if it is wider than high
+ /* #nosec diagramFilenamePNG is not tainted */
+ imagePath, _ := os.Open(diagramFilenamePNG)
+ defer func() { _ = imagePath.Close() }()
+ srcImage, _, _ := image.Decode(imagePath)
+ srcDimensions := srcImage.Bounds()
+ // wider than high?
+ muchWiderThanHigh := srcDimensions.Dx() > int(float64(srcDimensions.Dy())*1.25)
+ // fresh page (eventually landscape)?
+ r.isLandscapePage = false
+ _ = tempFolder
+ /*
+ pinnedWidth, pinnedHeight := 190.0, 210.0
+ if dataFlowDiagramFullscreen {
+ pinnedHeight = 235.0
+ if muchWiderThanHigh {
+ if allowedPdfLandscapePages {
+ pinnedWidth = 275.0
+ isLandscapePage = true
+ r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4"))
+ } else {
+ // so rotate the image left by 90 degrees
+ // ok, use temp PNG then
+ // now rotate left by 90 degrees
+ rotatedFile, err := os.CreateTemp(tempFolder, "diagram-*-.png")
+ checkErr(err)
+ defer os.Remove(rotatedFile.Name())
+ dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx()))
+ err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0})
+ checkErr(err)
+ newImage, _ := os.Create(rotatedFile.Name())
+ defer newImage.Close()
+ err = png.Encode(newImage, dstImage)
+ checkErr(err)
+ diagramFilenamePNG = rotatedFile.Name()
+ }
+ } else {
+ r.pdf.AddPage()
+ }
+ } else {
+ r.pdf.Ln(10)
+ }*/
+ // embed in PDF
+ var options gofpdf.ImageOptions
+ options.ImageType = ""
+ r.pdf.RegisterImage(diagramFilenamePNG, "")
+ var maxWidth, maxHeight, newWidth int
+ var embedWidth, embedHeight float64
+ if allowedPdfLandscapePages && muchWiderThanHigh {
+ maxWidth, maxHeight = 275, 150
+ r.isLandscapePage = true
+ r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4"))
+ } else {
+ r.pdf.Ln(10)
+ maxWidth, maxHeight = 190, 200 // reduced height as a text paragraph is above
+ }
+ newWidth = srcDimensions.Dx() / (srcDimensions.Dy() / maxHeight)
+ if newWidth <= maxWidth {
+ embedWidth, embedHeight = 0, float64(maxHeight)
+ } else {
+ embedWidth, embedHeight = float64(maxWidth), 0
+ }
+ r.pdf.ImageOptions(diagramFilenamePNG, 10, r.pdf.GetY(), embedWidth, embedHeight, true, options, 0, "")
+ r.isLandscapePage = false
+
+ // add diagram legend page
+ if embedDiagramLegendPage {
+ r.pdf.AddPage()
+ gofpdi.UseImportedTemplate(r.pdf, r.diagramLegendTemplateId, 0, 0, 0, 300)
+ }
+}
+
+func sortedKeysOfIndividualRiskCategories(parsedModel *types.ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range parsedModel.IndividualRiskCategories {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (r *pdfReporter) embedDataRiskMapping(diagramFilenamePNG string, tempFolder string) {
+ r.pdf.SetTextColor(0, 0, 0)
+ title := "Data Mapping"
+ r.addHeadline(title, false)
+ r.defineLinkTarget("{data-risk-mapping}")
+ r.currentChapterTitleBreadcrumb = title
+
+ var intro strings.Builder
+ intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " +
+ "distribution of data assets across technical assets. The color matches the identified data breach probability and risk level " +
+ "(see the \"Data Breach Probabilities\" chapter for more details). " +
+ "A solid line stands for data is stored by the asset and a dashed one means " +
+ "data is processed by the asset. For a full high-resolution version of this diagram please refer to the PNG image " +
+ "file alongside this report.")
+
+ html := r.pdf.HTMLBasicNew()
+ html.Write(5, intro.String())
+
+ // TODO dedupe with code from other diagram embedding (almost same code)
+ // check to rotate the image if it is wider than high
+ /* #nosec diagramFilenamePNG is not tainted */
+ imagePath, _ := os.Open(diagramFilenamePNG)
+ defer func() { _ = imagePath.Close() }()
+ srcImage, _, _ := image.Decode(imagePath)
+ srcDimensions := srcImage.Bounds()
+ // wider than high?
+ widerThanHigh := srcDimensions.Dx() > srcDimensions.Dy()
+ pinnedWidth, pinnedHeight := 190.0, 195.0
+ // fresh page (eventually landscape)?
+ r.isLandscapePage = false
+ _ = tempFolder
+ /*
+ if dataFlowDiagramFullscreen {
+ pinnedHeight = 235.0
+ if widerThanHigh {
+ if allowedPdfLandscapePages {
+ pinnedWidth = 275.0
+ isLandscapePage = true
+ r.pdf.AddPageFormat("L", r.pdf.GetPageSizeStr("A4"))
+ } else {
+ // so rotate the image left by 90 degrees
+ // ok, use temp PNG then
+ // now rotate left by 90 degrees
+ rotatedFile, err := os.CreateTemp(tempFolder, "diagram-*-.png")
+ checkErr(err)
+ defer os.Remove(rotatedFile.Name())
+ dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx()))
+ err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0})
+ checkErr(err)
+ newImage, _ := os.Create(rotatedFile.Name())
+ defer newImage.Close()
+ err = png.Encode(newImage, dstImage)
+ checkErr(err)
+ diagramFilenamePNG = rotatedFile.Name()
+ }
+ } else {
+ r.pdf.AddPage()
+ }
+ } else {
+ r.pdf.Ln(10)
+ }
+ */
+ // embed in PDF
+ r.pdf.Ln(10)
+ var options gofpdf.ImageOptions
+ options.ImageType = ""
+ r.pdf.RegisterImage(diagramFilenamePNG, "")
+ if widerThanHigh {
+ pinnedHeight = 0
+ } else {
+ pinnedWidth = 0
+ }
+ r.pdf.ImageOptions(diagramFilenamePNG, 10, r.pdf.GetY(), pinnedWidth, pinnedHeight, true, options, 0, "")
+ r.isLandscapePage = false
+}
+
+func (r *pdfReporter) writeReportToFile(reportFilename string) error {
+ err := r.pdf.OutputFileAndClose(reportFilename)
+ if err != nil {
+ return fmt.Errorf("error writing PDF report file: %w", err)
+ }
+ return nil
+}
+
+func (r *pdfReporter) addHeadline(headline string, small bool) {
+ r.pdf.AddPage()
+ gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300)
+ fontSize := fontSizeHeadline
+ if small {
+ fontSize = fontSizeHeadlineSmall
+ }
+ r.pdf.SetFont("Helvetica", "B", float64(fontSize))
+ r.pdf.Text(11, 40, headline)
+ r.pdf.SetFont("Helvetica", "", fontSizeBody)
+ r.pdf.SetX(17)
+ r.pdf.SetY(46)
+}
+
+func (r *pdfReporter) pageBreak() {
+ r.pdf.SetDrawColor(0, 0, 0)
+ r.pdf.SetDashPattern([]float64{}, 0)
+ r.pdf.AddPage()
+ gofpdi.UseImportedTemplate(r.pdf, r.contentTemplateId, 0, 0, 0, 300)
+ r.pdf.SetX(17)
+ r.pdf.SetY(20)
+}
+
+func (r *pdfReporter) pageBreakInLists() {
+ r.pageBreak()
+ r.pdf.SetLineWidth(0.25)
+ r.pdf.SetDrawColor(160, 160, 160)
+ r.pdf.SetDashPattern([]float64{0.5, 0.5}, 0)
+}
+
+func (r *pdfReporter) pdfColorDataAssets() {
+ r.pdf.SetTextColor(18, 36, 111)
+}
+func rgbHexColorDataAssets() string {
+ return "#12246F"
+}
+
+func (r *pdfReporter) pdfColorTechnicalAssets() {
+ r.pdf.SetTextColor(18, 36, 111)
+}
+func rgbHexColorTechnicalAssets() string {
+ return "#12246F"
+}
+
+func (r *pdfReporter) pdfColorTrustBoundaries() {
+ r.pdf.SetTextColor(18, 36, 111)
+}
+func rgbHexColorTrustBoundaries() string {
+ return "#12246F"
+}
+
+func (r *pdfReporter) pdfColorSharedRuntime() {
+ r.pdf.SetTextColor(18, 36, 111)
+}
+func rgbHexColorSharedRuntime() string {
+ return "#12246F"
+}
+
+func (r *pdfReporter) pdfColorRiskFindings() {
+ r.pdf.SetTextColor(160, 40, 30)
+}
+
+func rgbHexColorRiskFindings() string {
+ return "#A0281E"
+}
+
+func (r *pdfReporter) pdfColorDisclaimer() {
+ r.pdf.SetTextColor(140, 140, 140)
+}
+func rgbHexColorDisclaimer() string {
+ return "#8C8C8C"
+}
+
+func (r *pdfReporter) pdfColorOutOfScope() {
+ r.pdf.SetTextColor(127, 127, 127)
+}
+
+func rgbHexColorOutOfScope() string {
+ return "#7F7F7F"
+}
+
+func (r *pdfReporter) pdfColorGray() {
+ r.pdf.SetTextColor(80, 80, 80)
+}
+func rgbHexColorGray() string {
+ return "#505050"
+}
+
+func (r *pdfReporter) pdfColorLightGray() {
+ r.pdf.SetTextColor(100, 100, 100)
+}
+func rgbHexColorLightGray() string {
+ return "#646464"
+}
+
+func (r *pdfReporter) pdfColorBlack() {
+ r.pdf.SetTextColor(0, 0, 0)
+}
+func rgbHexColorBlack() string {
+ return "#000000"
+}
+
+func (r *pdfReporter) pdfColorRed() {
+ r.pdf.SetTextColor(255, 0, 0)
+}
+func rgbHexColorRed() string {
+ return "#FF0000"
+}
diff --git a/risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go b/pkg/security/risks/builtin/accidental-secret-leak-rule.go
similarity index 54%
rename from risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go
rename to pkg/security/risks/builtin/accidental-secret-leak-rule.go
index 3f4f9b4c..fe0afee3 100644
--- a/risks/built-in/accidental-secret-leak/accidental-secret-leak-rule.go
+++ b/pkg/security/risks/builtin/accidental-secret-leak-rule.go
@@ -1,11 +1,17 @@
-package accidental_secret_leak
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type AccidentalSecretLeakRule struct{}
+
+func NewAccidentalSecretLeakRule() *AccidentalSecretLeakRule {
+ return &AccidentalSecretLeakRule{}
+}
+
+func (*AccidentalSecretLeakRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "accidental-secret-leak",
Title: "Accidental Secret Leak",
Description: "Sourcecode repositories (including their histories) as well as artifact registries can accidentally contain secrets like " +
@@ -20,31 +26,31 @@ func Category() model.RiskCategory {
"See for example tools like \"git-secrets\" or \"Talisman\" to have check-in preventive measures for secrets. " +
"Consider also to regularly scan your repositories for secrets accidentally checked-in using scanning tools like \"gitleaks\" or \"gitrob\".",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.InformationDisclosure,
+ Function: types.Operations,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "In-scope sourcecode repositories and artifact registries.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Usually no false positives.",
ModelFailurePossibleReason: false,
CWE: 200,
}
}
-func SupportedTags() []string {
+func (*AccidentalSecretLeakRule) SupportedTags() []string {
return []string{"git", "nexus"}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- techAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *AccidentalSecretLeakRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range parsedModel.SortedTechnicalAssetIDs() {
+ techAsset := parsedModel.TechnicalAssets[id]
if !techAsset.OutOfScope &&
- (techAsset.Technology == model.SourcecodeRepository || techAsset.Technology == model.ArtifactRegistry) {
- var risk model.Risk
+ (techAsset.Technology == types.SourcecodeRepository || techAsset.Technology == types.ArtifactRegistry) {
+ var risk types.Risk
if techAsset.IsTaggedWithAny("git") {
- risk = createRisk(techAsset, "Git", "Git Leak Prevention")
+ risk = r.createRisk(parsedModel, techAsset, "Git", "Git Leak Prevention")
} else {
- risk = createRisk(techAsset, "", "")
+ risk = r.createRisk(parsedModel, techAsset, "", "")
}
risks = append(risks, risk)
}
@@ -52,7 +58,7 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, prefix, details string) model.Risk {
+func (r *AccidentalSecretLeakRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, prefix, details string) types.Risk {
if len(prefix) > 0 {
prefix = " (" + prefix + ")"
}
@@ -60,28 +66,28 @@ func createRisk(technicalAsset model.TechnicalAsset, prefix, details string) mod
if len(details) > 0 {
title += ": " + details + ""
}
- impact := model.LowImpact
- if technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical ||
- technicalAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
+ impact := types.LowImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(parsedModel) >= types.Critical ||
+ technicalAsset.HighestAvailability(parsedModel) >= types.Critical {
+ impact = types.MediumImpact
}
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.HighImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.HighImpact
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/code-backdooring/code-backdooring-rule.go b/pkg/security/risks/builtin/code-backdooring-rule.go
similarity index 68%
rename from risks/built-in/code-backdooring/code-backdooring-rule.go
rename to pkg/security/risks/builtin/code-backdooring-rule.go
index 1f6e518e..1e19966f 100644
--- a/risks/built-in/code-backdooring/code-backdooring-rule.go
+++ b/pkg/security/risks/builtin/code-backdooring-rule.go
@@ -1,11 +1,17 @@
-package code_backdooring
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type CodeBackdooringRule struct{}
+
+func NewCodeBackdooringRule() *CodeBackdooringRule {
+ return &CodeBackdooringRule{}
+}
+
+func (*CodeBackdooringRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "code-backdooring",
Title: "Code Backdooring",
Description: "For each build-pipeline component Code Backdooring risks might arise where attackers compromise the build-pipeline " +
@@ -20,8 +26,8 @@ func Category() model.RiskCategory {
"components on the public internet and also not exposing it in front of unmanaged (out-of-scope) developer clients." +
"Also consider the use of code signing to prevent code modifications.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.Tampering,
+ Function: types.Operations,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope development relevant technical assets which are either accessed by out-of-scope unmanaged " +
"developer clients and/or are directly accessed by any kind of internet-located (non-VPN) component or are themselves directly located " +
"on the internet.",
@@ -36,27 +42,27 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*CodeBackdooringRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *CodeBackdooringRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range parsedModel.SortedTechnicalAssetIDs() {
+ technicalAsset := parsedModel.TechnicalAssets[id]
if !technicalAsset.OutOfScope && technicalAsset.Technology.IsDevelopmentRelevant() {
if technicalAsset.Internet {
- risks = append(risks, createRisk(technicalAsset, true))
+ risks = append(risks, r.createRisk(parsedModel, technicalAsset, true))
continue
}
// TODO: ensure that even internet or unmanaged clients coming over a reverse-proxy or load-balancer like component are treated as if it was directly accessed/exposed on the internet or towards unmanaged dev clients
//riskByLinkAdded := false
- for _, callerLink := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
- caller := model.ParsedModelRoot.TechnicalAssets[callerLink.SourceId]
+ for _, callerLink := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
+ caller := parsedModel.TechnicalAssets[callerLink.SourceId]
if (!callerLink.VPN && caller.Internet) || caller.OutOfScope {
- risks = append(risks, createRisk(technicalAsset, true))
+ risks = append(risks, r.createRisk(parsedModel, technicalAsset, true))
//riskByLinkAdded = true
break
}
@@ -66,17 +72,17 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Risk {
+func (r *CodeBackdooringRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, elevatedRisk bool) types.Risk {
title := "Code Backdooring risk at " + technicalAsset.Title + ""
- impact := model.LowImpact
- if technicalAsset.Technology != model.CodeInspectionPlatform {
+ impact := types.LowImpact
+ if technicalAsset.Technology != types.CodeInspectionPlatform {
if elevatedRisk {
- impact = model.MediumImpact
+ impact = types.MediumImpact
}
- if technicalAsset.HighestConfidentiality() >= model.Confidential || technicalAsset.HighestIntegrity() >= model.Critical {
- impact = model.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) >= types.Confidential || technicalAsset.HighestIntegrity(input) >= types.Critical {
+ impact = types.MediumImpact
if elevatedRisk {
- impact = model.HighImpact
+ impact = types.HighImpact
}
}
}
@@ -84,10 +90,10 @@ func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Ri
uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{})
uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true
for _, codeDeploymentTargetCommLink := range technicalAsset.CommunicationLinks {
- if codeDeploymentTargetCommLink.Usage == model.DevOps {
+ if codeDeploymentTargetCommLink.Usage == types.DevOps {
for _, dataAssetID := range codeDeploymentTargetCommLink.DataAssetsSent {
// it appears to be code when elevated integrity rating of sent data asset
- if model.ParsedModelRoot.DataAssets[dataAssetID].Integrity >= model.Important {
+ if input.DataAssets[dataAssetID].Integrity >= types.Important {
// here we've got a deployment target which has its data assets at risk via deployment of backdoored code
uniqueDataBreachTechnicalAssetIDs[codeDeploymentTargetCommLink.TargetId] = true
break
@@ -96,20 +102,20 @@ func createRisk(technicalAsset model.TechnicalAsset, elevatedRisk bool) model.Ri
}
}
dataBreachTechnicalAssetIDs := make([]string, 0)
- for key, _ := range uniqueDataBreachTechnicalAssetIDs {
+ for key := range uniqueDataBreachTechnicalAssetIDs {
dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key)
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs,
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go b/pkg/security/risks/builtin/container-baseimage-backdooring-rule.go
similarity index 59%
rename from risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go
rename to pkg/security/risks/builtin/container-baseimage-backdooring-rule.go
index e8d92d37..f77d412d 100644
--- a/risks/built-in/container-baseimage-backdooring/container-baseimage-backdooring-rule.go
+++ b/pkg/security/risks/builtin/container-baseimage-backdooring-rule.go
@@ -1,11 +1,17 @@
-package container_baseimage_backdooring
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type ContainerBaseImageBackdooringRule struct{}
+
+func NewContainerBaseImageBackdooringRule() *ContainerBaseImageBackdooringRule {
+ return &ContainerBaseImageBackdooringRule{}
+}
+
+func (*ContainerBaseImageBackdooringRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "container-baseimage-backdooring",
Title: "Container Base Image Backdooring",
Description: "When a technical asset is built using container technologies, Base Image Backdooring risks might arise where " +
@@ -20,8 +26,8 @@ func Category() model.RiskCategory {
"Also consider using Google's Distroless base images or otherwise very small base images. " +
"Regularly execute container image scans with tools checking the layers for vulnerable components.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS/CSVS applied?",
- Function: model.Operations,
- STRIDE: model.Tampering,
+ Function: types.Operations,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope technical assets running as containers.",
RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets.",
FalsePositives: "Fully trusted (i.e. reviewed and cryptographically signed or similar) base images of containers can be considered " +
@@ -31,39 +37,39 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*ContainerBaseImageBackdooringRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && technicalAsset.Machine == model.Container {
- risks = append(risks, createRisk(technicalAsset))
+func (r *ContainerBaseImageBackdooringRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range parsedModel.SortedTechnicalAssetIDs() {
+ technicalAsset := parsedModel.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && technicalAsset.Machine == types.Container {
+ risks = append(risks, r.createRisk(parsedModel, technicalAsset))
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *ContainerBaseImageBackdooringRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Container Base Image Backdooring risk at " + technicalAsset.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.HighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/container-platform-escape/container-platform-escape-rule.go b/pkg/security/risks/builtin/container-platform-escape-rule.go
similarity index 65%
rename from risks/built-in/container-platform-escape/container-platform-escape-rule.go
rename to pkg/security/risks/builtin/container-platform-escape-rule.go
index 520b3d1d..3d32c99f 100644
--- a/risks/built-in/container-platform-escape/container-platform-escape-rule.go
+++ b/pkg/security/risks/builtin/container-platform-escape-rule.go
@@ -1,11 +1,17 @@
-package container_platform_escape
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type ContainerPlatformEscapeRule struct{}
+
+func NewContainerPlatformEscapeRule() *ContainerPlatformEscapeRule {
+ return &ContainerPlatformEscapeRule{}
+}
+
+func (*ContainerPlatformEscapeRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "container-platform-escape",
Title: "Container Platform Escape",
Description: "Container platforms are especially interesting targets for attackers as they host big parts of a containerized runtime infrastructure. " +
@@ -25,10 +31,10 @@ func Category() model.RiskCategory {
"Use only trusted base images, verify digital signatures and apply image creation best practices. Also consider using Google's Distroless base images or otherwise very small base images. " +
"Apply namespace isolation and nod affinity to separate pods from each other in terms of access and nodes the same style as you separate data.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS or CSVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Operations,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "In-scope container platforms.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Container platforms not running parts of the target architecture can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -36,47 +42,47 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*ContainerPlatformEscapeRule) SupportedTags() []string {
return []string{"docker", "kubernetes", "openshift"}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && technicalAsset.Technology == model.ContainerPlatform {
- risks = append(risks, createRisk(technicalAsset))
+func (r *ContainerPlatformEscapeRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range parsedModel.SortedTechnicalAssetIDs() {
+ technicalAsset := parsedModel.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && technicalAsset.Technology == types.ContainerPlatform {
+ risks = append(risks, r.createRisk(parsedModel, technicalAsset))
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *ContainerPlatformEscapeRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Container Platform Escape risk at " + technicalAsset.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.HighImpact
}
// data breach at all container assets
dataBreachTechnicalAssetIDs := make([]string, 0)
- for id, techAsset := range model.ParsedModelRoot.TechnicalAssets {
- if techAsset.Machine == model.Container {
+ for id, techAsset := range parsedModel.TechnicalAssets {
+ if techAsset.Machine == types.Container {
dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, id)
}
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs,
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go b/pkg/security/risks/builtin/cross-site-request-forgery-rule.go
similarity index 61%
rename from risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go
rename to pkg/security/risks/builtin/cross-site-request-forgery-rule.go
index 04ed7882..1f19d324 100644
--- a/risks/built-in/cross-site-request-forgery/cross-site-request-forgery-rule.go
+++ b/pkg/security/risks/builtin/cross-site-request-forgery-rule.go
@@ -1,11 +1,17 @@
-package cross_site_request_forgery
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type CrossSiteRequestForgeryRule struct{}
+
+func NewCrossSiteRequestForgeryRule() *CrossSiteRequestForgeryRule {
+ return &CrossSiteRequestForgeryRule{}
+}
+
+func (*CrossSiteRequestForgeryRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "cross-site-request-forgery",
Title: "Cross-Site Request Forgery (CSRF)",
Description: "When a web application is accessed via web protocols Cross-Site Request Forgery (CSRF) risks might arise.",
@@ -19,8 +25,8 @@ func Category() model.RiskCategory {
"the same-site flag. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Spoofing,
+ Function: types.Development,
+ STRIDE: types.Spoofing,
DetectionLogic: "In-scope web applications accessed via typical web access protocols.",
RiskAssessment: "The risk rating depends on the integrity rating of the data sent across the communication link.",
FalsePositives: "Web applications passing the authentication sate via custom headers instead of cookies can " +
@@ -32,49 +38,49 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*CrossSiteRequestForgeryRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *CrossSiteRequestForgeryRule) GenerateRisks(parsedModel *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range parsedModel.SortedTechnicalAssetIDs() {
+ technicalAsset := parsedModel.TechnicalAssets[id]
if technicalAsset.OutOfScope || !technicalAsset.Technology.IsWebApplication() {
continue
}
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ incomingFlows := parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
if incomingFlow.Protocol.IsPotentialWebAccessProtocol() {
- likelihood := model.VeryLikely
- if incomingFlow.Usage == model.DevOps {
- likelihood = model.Likely
+ likelihood := types.VeryLikely
+ if incomingFlow.Usage == types.DevOps {
+ likelihood = types.Likely
}
- risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
+ risks = append(risks, r.createRisk(parsedModel, technicalAsset, incomingFlow, likelihood))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
- sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
+func (r *CrossSiteRequestForgeryRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk {
+ sourceAsset := parsedModel.TechnicalAssets[incomingFlow.SourceId]
title := "Cross-Site Request Forgery (CSRF) risk at " + technicalAsset.Title + " via " + incomingFlow.Title + " from " + sourceAsset.Title + ""
- impact := model.LowImpact
- if incomingFlow.HighestIntegrity() == model.MissionCritical {
- impact = model.MediumImpact
+ impact := types.LowImpact
+ if incomingFlow.HighestIntegrity(parsedModel) == types.MissionCritical {
+ impact = types.MediumImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
}
diff --git a/risks/built-in/cross-site-scripting/cross-site-scripting-rule.go b/pkg/security/risks/builtin/cross-site-scripting-rule.go
similarity index 60%
rename from risks/built-in/cross-site-scripting/cross-site-scripting-rule.go
rename to pkg/security/risks/builtin/cross-site-scripting-rule.go
index a6da6781..829de3af 100644
--- a/risks/built-in/cross-site-scripting/cross-site-scripting-rule.go
+++ b/pkg/security/risks/builtin/cross-site-scripting-rule.go
@@ -1,11 +1,17 @@
-package cross_site_scripting
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type CrossSiteScriptingRule struct{}
+
+func NewCrossSiteScriptingRule() *CrossSiteScriptingRule {
+ return &CrossSiteScriptingRule{}
+}
+
+func (*CrossSiteScriptingRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "cross-site-scripting",
Title: "Cross-Site Scripting (XSS)",
Description: "For each web application Cross-Site Scripting (XSS) risks might arise. In terms " +
@@ -18,10 +24,10 @@ func Category() model.RiskCategory {
"to avoid DOM-based XSS. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Tampering,
+ Function: types.Development,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope web applications.",
- RiskAssessment: "The risk rating depends on the sensitivity of the data processed or stored in the web application.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the data processed in the web application.",
FalsePositives: "When the technical asset " +
"is not accessed via a browser-like component (i.e not by a human user initiating the request that " +
"gets passed through all components until it reaches the web application) this can be considered a false positive.",
@@ -30,38 +36,38 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*CrossSiteScriptingRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *CrossSiteScriptingRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.OutOfScope || !technicalAsset.Technology.IsWebApplication() { // TODO: also mobile clients or rich-clients as long as they use web-view...
continue
}
- risks = append(risks, createRisk(technicalAsset))
+ risks = append(risks, r.createRisk(input, technicalAsset))
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *CrossSiteScriptingRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Cross-Site Scripting (XSS) risk at " + technicalAsset.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical {
+ impact = types.HighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Likely, impact),
- ExploitationLikelihood: model.Likely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Likely, impact),
+ ExploitationLikelihood: types.Likely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Possible,
+ DataBreachProbability: types.Possible,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go b/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go
new file mode 100644
index 00000000..09fc70b3
--- /dev/null
+++ b/pkg/security/risks/builtin/dos-risky-access-across-trust-boundary-rule.go
@@ -0,0 +1,104 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type DosRiskyAccessAcrossTrustBoundaryRule struct{}
+
+func NewDosRiskyAccessAcrossTrustBoundaryRule() *DosRiskyAccessAcrossTrustBoundaryRule {
+ return &DosRiskyAccessAcrossTrustBoundaryRule{}
+}
+
+func (*DosRiskyAccessAcrossTrustBoundaryRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "dos-risky-access-across-trust-boundary",
+ Title: "DoS-risky Access Across Trust-Boundary",
+ Description: "Assets accessed across trust boundaries with critical or mission-critical availability rating " +
+ "are more prone to Denial-of-Service (DoS) risks.",
+ Impact: "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.",
+ ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html",
+ Action: "Anti-DoS Measures",
+ Mitigation: "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. " +
+ "Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. " +
+ "Generally applying redundancy on the targeted technical asset reduces the risk of DoS.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Operations,
+ STRIDE: types.DenialOfService,
+ DetectionLogic: "In-scope technical assets (excluding " + types.LoadBalancer.String() + ") with " +
+ "availability rating of " + types.Critical.String() + " or higher which have incoming data-flows across a " +
+ "network trust-boundary (excluding " + types.DevOps.String() + " usage).",
+ RiskAssessment: "Matching technical assets with availability rating " +
+ "of " + types.Critical.String() + " or higher are " +
+ "at " + types.LowSeverity.String() + " risk. When the availability rating is " +
+ types.MissionCritical.String() + " and neither a VPN nor IP filter for the incoming data-flow nor redundancy " +
+ "for the asset is applied, the risk-rating is considered " + types.MediumSeverity.String() + ".", // TODO reduce also, when data-flow authenticated and encrypted?
+ FalsePositives: "When the accessed target operations are not time- or resource-consuming.",
+ ModelFailurePossibleReason: false,
+ CWE: 400,
+ }
+}
+
+func (*DosRiskyAccessAcrossTrustBoundaryRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *DosRiskyAccessAcrossTrustBoundaryRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && technicalAsset.Technology != types.LoadBalancer &&
+ technicalAsset.Availability >= types.Critical {
+ for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
+ sourceAsset := input.TechnicalAssets[incomingAccess.SourceId]
+ if sourceAsset.Technology.IsTrafficForwarding() {
+ // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human
+ callersCommLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[sourceAsset.Id]
+ for _, callersCommLink := range callersCommLinks {
+ risks = r.checkRisk(input, technicalAsset, callersCommLink, sourceAsset.Title, risks)
+ }
+ } else {
+ risks = r.checkRisk(input, technicalAsset, incomingAccess, "", risks)
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func (r *DosRiskyAccessAcrossTrustBoundaryRule) checkRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink, hopBetween string, risks []types.Risk) []types.Risk {
+ if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) &&
+ !incomingAccess.Protocol.IsProcessLocal() && incomingAccess.Usage != types.DevOps {
+ highRisk := technicalAsset.Availability == types.MissionCritical &&
+ !incomingAccess.VPN && !incomingAccess.IpFiltered && !technicalAsset.Redundant
+ risks = append(risks, r.createRisk(technicalAsset, incomingAccess, hopBetween,
+ input.TechnicalAssets[incomingAccess.SourceId], highRisk))
+ }
+ return risks
+}
+
+func (r *DosRiskyAccessAcrossTrustBoundaryRule) createRisk(techAsset types.TechnicalAsset, dataFlow types.CommunicationLink, hopBetween string,
+ clientOutsideTrustBoundary types.TechnicalAsset, moreRisky bool) types.Risk {
+ impact := types.LowImpact
+ if moreRisky {
+ impact = types.MediumImpact
+ }
+ if len(hopBetween) > 0 {
+ hopBetween = " forwarded via " + hopBetween + ""
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: "Denial-of-Service risky access of " + techAsset.Title + " by " + clientOutsideTrustBoundary.Title +
+ " via " + dataFlow.Title + "" + hopBetween,
+ MostRelevantTechnicalAssetId: techAsset.Id,
+ MostRelevantCommunicationLinkId: dataFlow.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataFlow.Id
+ return risk
+}
diff --git a/risks/built-in/incomplete-model/incomplete-model-rule.go b/pkg/security/risks/builtin/incomplete-model-rule.go
similarity index 50%
rename from risks/built-in/incomplete-model/incomplete-model-rule.go
rename to pkg/security/risks/builtin/incomplete-model-rule.go
index 55399410..b954d646 100644
--- a/risks/built-in/incomplete-model/incomplete-model-rule.go
+++ b/pkg/security/risks/builtin/incomplete-model-rule.go
@@ -1,11 +1,17 @@
-package incomplete_model
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type IncompleteModelRule struct{}
+
+func NewIncompleteModelRule() *IncompleteModelRule {
+ return &IncompleteModelRule{}
+}
+
+func (*IncompleteModelRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "incomplete-model",
Title: "Incomplete Model",
Description: "When the threat model contains unknown technologies or transfers data over unknown protocols, this is " +
@@ -16,31 +22,31 @@ func Category() model.RiskCategory {
Action: "Threat Modeling Completeness",
Mitigation: "Try to find out what technology or protocol is used instead of specifying that it is unknown.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.InformationDisclosure,
+ Function: types.Architecture,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "All technical assets and communication links with technology type or protocol type specified as unknown.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*IncompleteModelRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *IncompleteModelRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if !technicalAsset.OutOfScope {
- if technicalAsset.Technology == model.UnknownTechnology {
- risks = append(risks, createRiskTechAsset(technicalAsset))
+ if technicalAsset.Technology == types.UnknownTechnology {
+ risks = append(risks, r.createRiskTechAsset(technicalAsset))
}
for _, commLink := range technicalAsset.CommunicationLinks {
- if commLink.Protocol == model.UnknownProtocol {
- risks = append(risks, createRiskCommLink(technicalAsset, commLink))
+ if commLink.Protocol == types.UnknownProtocol {
+ risks = append(risks, r.createRiskCommLink(technicalAsset, commLink))
}
}
}
@@ -48,35 +54,35 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRiskTechAsset(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *IncompleteModelRule) createRiskTechAsset(technicalAsset types.TechnicalAsset) types.Risk {
title := "Unknown Technology specified at technical asset " + technicalAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
-func createRiskCommLink(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink) model.Risk {
+func (r *IncompleteModelRule) createRiskCommLink(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink) types.Risk {
title := "Unknown Protocol specified for communication link " + commLink.Title + " at technical asset " + technicalAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: commLink.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + commLink.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + commLink.Id + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/ldap-injection/ldap-injection-rule.go b/pkg/security/risks/builtin/ldap-injection-rule.go
similarity index 54%
rename from risks/built-in/ldap-injection/ldap-injection-rule.go
rename to pkg/security/risks/builtin/ldap-injection-rule.go
index e76d2706..5f04a719 100644
--- a/risks/built-in/ldap-injection/ldap-injection-rule.go
+++ b/pkg/security/risks/builtin/ldap-injection-rule.go
@@ -1,15 +1,21 @@
-package ldap_injection
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type LdapInjectionRule struct{}
+
+func NewLdapInjectionRule() *LdapInjectionRule {
+ return &LdapInjectionRule{}
+}
+
+func (*LdapInjectionRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "ldap-injection",
Title: "LDAP-Injection",
Description: "When an LDAP server is accessed LDAP-Injection risks might arise. " +
- "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.",
+ "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed.",
Impact: "If this risk remains unmitigated, attackers might be able to modify LDAP queries and access more data from the LDAP server than allowed.",
ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html",
@@ -18,10 +24,10 @@ func Category() model.RiskCategory {
"the LDAP sever in order to stay safe from LDAP-Injection vulnerabilities. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Tampering,
+ Function: types.Development,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope clients accessing LDAP servers via typical LDAP access protocols.",
- RiskAssessment: "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed.",
FalsePositives: "LDAP server queries by search values not consisting of parts controllable by the caller can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -29,49 +35,49 @@ func Category() model.RiskCategory {
}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+func (*LdapInjectionRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *LdapInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
+ incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
- if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
+ if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
- if incomingFlow.Protocol == model.LDAP || incomingFlow.Protocol == model.LDAPS {
- likelihood := model.Likely
- if incomingFlow.Usage == model.DevOps {
- likelihood = model.Unlikely
+ if incomingFlow.Protocol == types.LDAP || incomingFlow.Protocol == types.LDAPS {
+ likelihood := types.Likely
+ if incomingFlow.Usage == types.DevOps {
+ likelihood = types.Unlikely
}
- risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
+ risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood))
}
}
}
return risks
}
-func SupportedTags() []string {
- return []string{}
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
- caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
+func (r *LdapInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk {
+ caller := input.TechnicalAssets[incomingFlow.SourceId]
title := "LDAP-Injection risk at " + caller.Title + " against LDAP server " + technicalAsset.Title + "" +
" via " + incomingFlow.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical {
+ impact = types.HighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
}
diff --git a/pkg/security/risks/builtin/missing-authentication-rule.go b/pkg/security/risks/builtin/missing-authentication-rule.go
new file mode 100644
index 00000000..faf334c5
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-authentication-rule.go
@@ -0,0 +1,104 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingAuthenticationRule struct{}
+
+func NewMissingAuthenticationRule() *MissingAuthenticationRule {
+ return &MissingAuthenticationRule{}
+}
+
+func (*MissingAuthenticationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-authentication",
+ Title: "Missing Authentication",
+ Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes sensitive data. ",
+ Impact: "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.",
+ ASVS: "V2 - Authentication Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
+ Action: "Authentication of Incoming Requests",
+ Mitigation: "Apply an authentication method to the technical asset. To protect highly sensitive data consider " +
+ "the use of two-factor authentication for human users.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope technical assets (except " + types.LoadBalancer.String() + ", " + types.ReverseProxy.String() + ", " + types.ServiceRegistry.String() + ", " + types.WAF.String() + ", " + types.IDS.String() + ", and " + types.IPS.String() + " and in-process calls) should authenticate incoming requests when the asset processes " +
+ "sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).",
+ RiskAssessment: "The risk rating (medium or high) " +
+ "depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.",
+ FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
+ "can be considered as false positives after individual review.",
+ ModelFailurePossibleReason: false,
+ CWE: 306,
+ }
+}
+
+func (*MissingAuthenticationRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingAuthenticationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.OutOfScope || technicalAsset.Technology == types.LoadBalancer ||
+ technicalAsset.Technology == types.ReverseProxy || technicalAsset.Technology == types.ServiceRegistry || technicalAsset.Technology == types.WAF || technicalAsset.Technology == types.IDS || technicalAsset.Technology == types.IPS {
+ continue
+ }
+ if technicalAsset.HighestConfidentiality(input) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(input) >= types.Critical ||
+ technicalAsset.HighestAvailability(input) >= types.Critical ||
+ technicalAsset.MultiTenant {
+ // check each incoming data flow
+ commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ for _, commLink := range commLinks {
+ caller := input.TechnicalAssets[commLink.SourceId]
+ if caller.Technology.IsUnprotectedCommunicationsTolerated() || caller.Type == types.Datastore {
+ continue
+ }
+ highRisk := commLink.HighestConfidentiality(input) == types.StrictlyConfidential ||
+ commLink.HighestIntegrity(input) == types.MissionCritical
+ lowRisk := commLink.HighestConfidentiality(input) <= types.Internal &&
+ commLink.HighestIntegrity(input) == types.Operational
+ impact := types.MediumImpact
+ if highRisk {
+ impact = types.HighImpact
+ } else if lowRisk {
+ impact = types.LowImpact
+ }
+ if commLink.Authentication == types.NoneAuthentication && !commLink.Protocol.IsProcessLocal() {
+ risks = append(risks, r.createRisk(input, technicalAsset, commLink, commLink, "", impact, types.Likely, false, r.Category()))
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func (r *MissingAuthenticationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess, incomingAccessOrigin types.CommunicationLink, hopBetween string,
+ impact types.RiskExploitationImpact, likelihood types.RiskExploitationLikelihood, twoFactor bool, category types.RiskCategory) types.Risk {
+ factorString := ""
+ if twoFactor {
+ factorString = "Two-Factor "
+ }
+ if len(hopBetween) > 0 {
+ hopBetween = "forwarded via " + hopBetween + " "
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
+ ExploitationLikelihood: likelihood,
+ ExploitationImpact: impact,
+ Title: "Missing " + factorString + "Authentication covering communication link " + incomingAccess.Title + " " +
+ "from " + input.TechnicalAssets[incomingAccessOrigin.SourceId].Title + " " + hopBetween +
+ "to " + technicalAsset.Title + "",
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ MostRelevantCommunicationLinkId: incomingAccess.Id,
+ DataBreachProbability: types.Possible,
+ DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + incomingAccess.Id + "@" + input.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go b/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go
new file mode 100644
index 00000000..9c159187
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-authentication-second-factor-rule.go
@@ -0,0 +1,91 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingAuthenticationSecondFactorRule struct {
+ missingAuthenticationRule *MissingAuthenticationRule
+}
+
+func NewMissingAuthenticationSecondFactorRule(missingAuthenticationRule *MissingAuthenticationRule) *MissingAuthenticationSecondFactorRule {
+ return &MissingAuthenticationSecondFactorRule{missingAuthenticationRule: missingAuthenticationRule}
+}
+
+func (*MissingAuthenticationSecondFactorRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-authentication-second-factor",
+ Title: "Missing Two-Factor Authentication (2FA)",
+ Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests with " +
+ "two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.",
+ Impact: "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.",
+ ASVS: "V2 - Authentication Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html",
+ Action: "Authentication with Second Factor (2FA)",
+ Mitigation: "Apply an authentication method to the technical asset protecting highly sensitive data via " +
+ "two-factor authentication for human users.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.BusinessSide,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope technical assets (except " + types.LoadBalancer.String() + ", " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", " + types.IDS.String() + ", and " + types.IPS.String() + ") should authenticate incoming requests via two-factor authentication (2FA) " +
+ "when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.",
+ RiskAssessment: types.MediumSeverity.String(),
+ FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
+ "can be considered as false positives after individual review.",
+ ModelFailurePossibleReason: false,
+ CWE: 308,
+ }
+}
+
+func (*MissingAuthenticationSecondFactorRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingAuthenticationSecondFactorRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.OutOfScope ||
+ technicalAsset.Technology.IsTrafficForwarding() ||
+ technicalAsset.Technology.IsUnprotectedCommunicationsTolerated() {
+ continue
+ }
+ if technicalAsset.HighestConfidentiality(input) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(input) >= types.Critical ||
+ technicalAsset.HighestAvailability(input) >= types.Critical ||
+ technicalAsset.MultiTenant {
+ // check each incoming data flow
+ commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ for _, commLink := range commLinks {
+ caller := input.TechnicalAssets[commLink.SourceId]
+ if caller.Technology.IsUnprotectedCommunicationsTolerated() || caller.Type == types.Datastore {
+ continue
+ }
+ if caller.UsedAsClientByHuman {
+ moreRisky := commLink.HighestConfidentiality(input) >= types.Confidential ||
+ commLink.HighestIntegrity(input) >= types.Critical
+ if moreRisky && commLink.Authentication != types.TwoFactor {
+ risks = append(risks, r.missingAuthenticationRule.createRisk(input, technicalAsset, commLink, commLink, "", types.MediumImpact, types.Unlikely, true, r.Category()))
+ }
+ } else if caller.Technology.IsTrafficForwarding() {
+ // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human
+ callersCommLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[caller.Id]
+ for _, callersCommLink := range callersCommLinks {
+ callersCaller := input.TechnicalAssets[callersCommLink.SourceId]
+ if callersCaller.Technology.IsUnprotectedCommunicationsTolerated() || callersCaller.Type == types.Datastore {
+ continue
+ }
+ if callersCaller.UsedAsClientByHuman {
+ moreRisky := callersCommLink.HighestConfidentiality(input) >= types.Confidential ||
+ callersCommLink.HighestIntegrity(input) >= types.Critical
+ if moreRisky && callersCommLink.Authentication != types.TwoFactor {
+ risks = append(risks, r.missingAuthenticationRule.createRisk(input, technicalAsset, commLink, callersCommLink, caller.Title, types.MediumImpact, types.Unlikely, true, r.Category()))
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return risks
+}
diff --git a/risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go b/pkg/security/risks/builtin/missing-build-infrastructure-rule.go
similarity index 57%
rename from risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go
rename to pkg/security/risks/builtin/missing-build-infrastructure-rule.go
index 1eb1662a..22dea469 100644
--- a/risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go
+++ b/pkg/security/risks/builtin/missing-build-infrastructure-rule.go
@@ -1,11 +1,17 @@
-package missing_build_infrastructure
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingBuildInfrastructureRule struct{}
+
+func NewMissingBuildInfrastructureRule() *MissingBuildInfrastructureRule {
+ return &MissingBuildInfrastructureRule{}
+}
+
+func (*MissingBuildInfrastructureRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-build-infrastructure",
Title: "Missing Build Infrastructure",
Description: "The modeled architecture does not contain a build infrastructure (devops-client, sourcecode-repo, build-pipeline, etc.), " +
@@ -19,8 +25,8 @@ func Category() model.RiskCategory {
Action: "Build Pipeline Hardening",
Mitigation: "Include the build infrastructure in the model.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Tampering,
+ Function: types.Architecture,
+ STRIDE: types.Tampering,
DetectionLogic: "Models with in-scope custom-developed parts missing in-scope development (code creation) and build infrastructure " +
"components (devops-client, sourcecode-repo, build-pipeline, etc.).",
RiskAssessment: "The risk rating depends on the highest sensitivity of the in-scope assets running custom-developed parts.",
@@ -31,66 +37,66 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MissingBuildInfrastructureRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
+func (r *MissingBuildInfrastructureRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
hasCustomDevelopedParts, hasBuildPipeline, hasSourcecodeRepo, hasDevOpsClient := false, false, false, false
- impact := model.LowImpact
- var mostRelevantAsset model.TechnicalAsset
- for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+ impact := types.LowImpact
+ var mostRelevantAsset types.TechnicalAsset
+ for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.CustomDevelopedParts && !technicalAsset.OutOfScope {
hasCustomDevelopedParts = true
- if impact == model.LowImpact {
+ if impact == types.LowImpact {
mostRelevantAsset = technicalAsset
- if technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical ||
- technicalAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(input) >= types.Critical ||
+ technicalAsset.HighestAvailability(input) >= types.Critical {
+ impact = types.MediumImpact
}
}
- if technicalAsset.Confidentiality >= model.Confidential ||
- technicalAsset.Integrity >= model.Critical ||
- technicalAsset.Availability >= model.Critical {
- impact = model.MediumImpact
+ if technicalAsset.Confidentiality >= types.Confidential ||
+ technicalAsset.Integrity >= types.Critical ||
+ technicalAsset.Availability >= types.Critical {
+ impact = types.MediumImpact
}
// just for referencing the most interesting asset
if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
mostRelevantAsset = technicalAsset
}
}
- if technicalAsset.Technology == model.BuildPipeline {
+ if technicalAsset.Technology == types.BuildPipeline {
hasBuildPipeline = true
}
- if technicalAsset.Technology == model.SourcecodeRepository {
+ if technicalAsset.Technology == types.SourcecodeRepository {
hasSourcecodeRepo = true
}
- if technicalAsset.Technology == model.DevOpsClient {
+ if technicalAsset.Technology == types.DevOpsClient {
hasDevOpsClient = true
}
}
hasBuildInfrastructure := hasBuildPipeline && hasSourcecodeRepo && hasDevOpsClient
if hasCustomDevelopedParts && !hasBuildInfrastructure {
- risks = append(risks, createRisk(mostRelevantAsset, impact))
+ risks = append(risks, r.createRisk(mostRelevantAsset, impact))
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk {
+func (r *MissingBuildInfrastructureRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk {
title := "Missing Build Infrastructure in the threat model (referencing asset " + technicalAsset.Title + " as an example)"
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go b/pkg/security/risks/builtin/missing-cloud-hardening-rule.go
similarity index 57%
rename from risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go
rename to pkg/security/risks/builtin/missing-cloud-hardening-rule.go
index e7dddb3a..b6f6b06a 100644
--- a/risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go
+++ b/pkg/security/risks/builtin/missing-cloud-hardening-rule.go
@@ -1,12 +1,19 @@
-package missing_cloud_hardening
+package builtin
import (
- "github.com/threagile/threagile/model"
"sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingCloudHardeningRule struct{}
+
+func NewMissingCloudHardeningRule() *MissingCloudHardeningRule {
+ return &MissingCloudHardeningRule{}
+}
+
+func (*MissingCloudHardeningRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-cloud-hardening",
Title: "Missing Cloud Hardening",
Description: "Cloud components should be hardened according to the cloud vendor best practices. This affects their " +
@@ -25,10 +32,10 @@ func Category() model.RiskCategory {
"
For Google Cloud Platform: Follow the CIS Benchmark for Google Cloud Computing Platform (see also the automated checks of cloud audit tools like \"CloudSploit\" or \"ScoutSuite\"). " +
"
For Oracle Cloud Platform: Follow the hardening best practices (see also the automated checks of cloud audit tools like \"CloudSploit\").",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.Tampering,
+ Function: types.Operations,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope cloud components (either residing in cloud trust boundaries or more specifically tagged with cloud provider types).",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Cloud components not running parts of the target architecture can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -36,59 +43,59 @@ func Category() model.RiskCategory {
}
}
-var specificSubtagsAWS = []string{"aws:vpc", "aws:ec2", "aws:s3", "aws:ebs", "aws:apigateway", "aws:lambda", "aws:dynamodb", "aws:rds", "aws:sqs", "aws:iam"}
+var specificSubTagsAWS = []string{"aws:vpc", "aws:ec2", "aws:s3", "aws:ebs", "aws:apigateway", "aws:lambda", "aws:dynamodb", "aws:rds", "aws:sqs", "aws:iam"}
-func SupportedTags() []string {
+func (*MissingCloudHardeningRule) SupportedTags() []string {
res := []string{
"aws", // Amazon AWS
"azure", // Microsoft Azure
"gcp", // Google Cloud Platform
"ocp", // Oracle Cloud Platform
}
- res = append(res, specificSubtagsAWS...)
+ res = append(res, specificSubTagsAWS...)
return res
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
+func (r *MissingCloudHardeningRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
- sharedRuntimesWithUnspecificCloudRisks := make(map[string]bool, 0)
- trustBoundariesWithUnspecificCloudRisks := make(map[string]bool, 0)
- techAssetsWithUnspecificCloudRisks := make(map[string]bool, 0)
+ sharedRuntimesWithUnspecificCloudRisks := make(map[string]bool)
+ trustBoundariesWithUnspecificCloudRisks := make(map[string]bool)
+ techAssetsWithUnspecificCloudRisks := make(map[string]bool)
- sharedRuntimeIDsAWS := make(map[string]bool, 0)
- trustBoundaryIDsAWS := make(map[string]bool, 0)
- techAssetIDsAWS := make(map[string]bool, 0)
+ sharedRuntimeIDsAWS := make(map[string]bool)
+ trustBoundaryIDsAWS := make(map[string]bool)
+ techAssetIDsAWS := make(map[string]bool)
- sharedRuntimeIDsAzure := make(map[string]bool, 0)
- trustBoundaryIDsAzure := make(map[string]bool, 0)
- techAssetIDsAzure := make(map[string]bool, 0)
+ sharedRuntimeIDsAzure := make(map[string]bool)
+ trustBoundaryIDsAzure := make(map[string]bool)
+ techAssetIDsAzure := make(map[string]bool)
- sharedRuntimeIDsGCP := make(map[string]bool, 0)
- trustBoundaryIDsGCP := make(map[string]bool, 0)
- techAssetIDsGCP := make(map[string]bool, 0)
+ sharedRuntimeIDsGCP := make(map[string]bool)
+ trustBoundaryIDsGCP := make(map[string]bool)
+ techAssetIDsGCP := make(map[string]bool)
- sharedRuntimeIDsOCP := make(map[string]bool, 0)
- trustBoundaryIDsOCP := make(map[string]bool, 0)
- techAssetIDsOCP := make(map[string]bool, 0)
+ sharedRuntimeIDsOCP := make(map[string]bool)
+ trustBoundaryIDsOCP := make(map[string]bool)
+ techAssetIDsOCP := make(map[string]bool)
- techAssetIDsWithSubtagSpecificCloudRisks := make(map[string]bool, 0)
+ techAssetIDsWithSubtagSpecificCloudRisks := make(map[string]bool)
- for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries {
- taggedOuterTB := trustBoundary.IsTaggedWithAny(SupportedTags()...) // false = generic cloud risks only // true = cloud-individual risks
+ for _, trustBoundary := range input.TrustBoundaries {
+ taggedOuterTB := trustBoundary.IsTaggedWithAny(r.SupportedTags()...) // false = generic cloud risks only // true = cloud-individual risks
if taggedOuterTB || trustBoundary.Type.IsWithinCloud() {
- addTrustBoundaryAccordingToBasetag(trustBoundary, trustBoundariesWithUnspecificCloudRisks,
+ r.addTrustBoundaryAccordingToBaseTag(trustBoundary, trustBoundariesWithUnspecificCloudRisks,
trustBoundaryIDsAWS, trustBoundaryIDsAzure, trustBoundaryIDsGCP, trustBoundaryIDsOCP)
- for _, techAssetID := range trustBoundary.RecursivelyAllTechnicalAssetIDsInside() {
+ for _, techAssetID := range trustBoundary.RecursivelyAllTechnicalAssetIDsInside(input) {
added := false
- tA := model.ParsedModelRoot.TechnicalAssets[techAssetID]
- if tA.IsTaggedWithAny(SupportedTags()...) {
- addAccordingToBasetag(tA, tA.Tags,
+ tA := input.TechnicalAssets[techAssetID]
+ if tA.IsTaggedWithAny(r.SupportedTags()...) {
+ addAccordingToBaseTag(tA, tA.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
added = true
} else if taggedOuterTB {
- addAccordingToBasetag(tA, trustBoundary.Tags,
+ addAccordingToBaseTag(tA, trustBoundary.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
added = true
@@ -101,31 +108,31 @@ func GenerateRisks() []model.Risk {
}
// now loop over all technical assets, trust boundaries, and shared runtimes model-wide by tag
- for _, tA := range model.TechnicalAssetsTaggedWithAny(SupportedTags()...) {
- addAccordingToBasetag(tA, tA.Tags,
+ for _, tA := range input.TechnicalAssetsTaggedWithAny(r.SupportedTags()...) {
+ addAccordingToBaseTag(tA, tA.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
}
- for _, tB := range model.TrustBoundariesTaggedWithAny(SupportedTags()...) {
- for _, candidateID := range tB.RecursivelyAllTechnicalAssetIDsInside() {
- tA := model.ParsedModelRoot.TechnicalAssets[candidateID]
- if tA.IsTaggedWithAny(SupportedTags()...) {
- addAccordingToBasetag(tA, tA.Tags,
+ for _, tB := range input.TrustBoundariesTaggedWithAny(r.SupportedTags()...) {
+ for _, candidateID := range tB.RecursivelyAllTechnicalAssetIDsInside(input) {
+ tA := input.TechnicalAssets[candidateID]
+ if tA.IsTaggedWithAny(r.SupportedTags()...) {
+ addAccordingToBaseTag(tA, tA.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
} else {
- addAccordingToBasetag(tA, tB.Tags,
+ addAccordingToBaseTag(tA, tB.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
}
}
}
- for _, sR := range model.SharedRuntimesTaggedWithAny(SupportedTags()...) {
- addSharedRuntimeAccordingToBasetag(sR, sharedRuntimesWithUnspecificCloudRisks,
+ for _, sR := range input.SharedRuntimesTaggedWithAny(r.SupportedTags()...) {
+ r.addSharedRuntimeAccordingToBaseTag(sR, sharedRuntimesWithUnspecificCloudRisks,
sharedRuntimeIDsAWS, sharedRuntimeIDsAzure, sharedRuntimeIDsGCP, sharedRuntimeIDsOCP)
for _, candidateID := range sR.TechnicalAssetsRunning {
- tA := model.ParsedModelRoot.TechnicalAssets[candidateID]
- addAccordingToBasetag(tA, sR.Tags,
+ tA := input.TechnicalAssets[candidateID]
+ addAccordingToBaseTag(tA, sR.Tags,
techAssetIDsWithSubtagSpecificCloudRisks,
techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP)
}
@@ -181,87 +188,87 @@ func GenerateRisks() []model.Risk {
// first try to add shared runtimes...
for id := range sharedRuntimeIDsAWS {
- risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "AWS", "CIS Benchmark for AWS"))
+ risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "AWS", "CIS Benchmark for AWS"))
addedAWS = true
}
for id := range sharedRuntimeIDsAzure {
- risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "Azure", "CIS Benchmark for Microsoft Azure"))
+ risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "Azure", "CIS Benchmark for Microsoft Azure"))
addedAzure = true
}
for id := range sharedRuntimeIDsGCP {
- risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
+ risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
addedGCP = true
}
for id := range sharedRuntimeIDsOCP {
- risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
+ risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
addedOCP = true
}
for id := range sharedRuntimesWithUnspecificCloudRisks {
- risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "", ""))
+ risks = append(risks, r.createRiskForSharedRuntime(input, input.SharedRuntimes[id], "", ""))
}
// ... followed by trust boundaries for the generic risks
for id := range trustBoundaryIDsAWS {
- risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "AWS", "CIS Benchmark for AWS"))
+ risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "AWS", "CIS Benchmark for AWS"))
addedAWS = true
}
for id := range trustBoundaryIDsAzure {
- risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "Azure", "CIS Benchmark for Microsoft Azure"))
+ risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "Azure", "CIS Benchmark for Microsoft Azure"))
addedAzure = true
}
for id := range trustBoundaryIDsGCP {
- risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
+ risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
addedGCP = true
}
for id := range trustBoundaryIDsOCP {
- risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
+ risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
addedOCP = true
}
for id := range trustBoundariesWithUnspecificCloudRisks {
- risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "", ""))
+ risks = append(risks, r.createRiskForTrustBoundary(input, input.TrustBoundaries[id], "", ""))
}
// just use the most sensitive asset as an example - to only create one general "AWS cloud hardening" risk, not many
if !addedAWS {
- mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAWS)
+ mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsAWS)
if !mostRelevantAsset.IsZero() {
- risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "AWS", "CIS Benchmark for AWS"))
+ risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "AWS", "CIS Benchmark for AWS"))
addedAWS = true
}
}
// just use the most sensitive asset as an example - to only create one general "Azure cloud hardening" risk, not many
if !addedAzure {
- mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAzure)
+ mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsAzure)
if !mostRelevantAsset.IsZero() {
- risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "Azure", "CIS Benchmark for Microsoft Azure"))
+ risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "Azure", "CIS Benchmark for Microsoft Azure"))
addedAzure = true
}
}
// just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many
if !addedGCP {
- mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsGCP)
+ mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsGCP)
if !mostRelevantAsset.IsZero() {
- risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
+ risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "GCP", "CIS Benchmark for Google Cloud Computing Platform"))
addedGCP = true
}
}
// just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many
if !addedOCP {
- mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsOCP)
+ mostRelevantAsset := findMostSensitiveTechnicalAsset(input, techAssetIDsOCP)
if !mostRelevantAsset.IsZero() {
- risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
+ risks = append(risks, r.createRiskForTechnicalAsset(input, mostRelevantAsset, "OCP", "Vendor Best Practices for Oracle Cloud Platform"))
addedOCP = true
}
}
// now also add all tech asset specific tag-specific risks, as they are specific to the asset anyway (therefore don't set added to true here)
for id := range techAssetIDsWithSubtagSpecificCloudRisks {
- tA := model.ParsedModelRoot.TechnicalAssets[id]
- if tA.IsTaggedWithAnyTraversingUp("aws:ec2") {
- risks = append(risks, createRiskForTechnicalAsset(tA, "EC2", "CIS Benchmark for Amazon Linux"))
+ tA := input.TechnicalAssets[id]
+ if tA.IsTaggedWithAnyTraversingUp(input, "aws:ec2") {
+ risks = append(risks, r.createRiskForTechnicalAsset(input, tA, "EC2", "CIS Benchmark for Amazon Linux"))
}
- if tA.IsTaggedWithAnyTraversingUp("aws:s3") {
- risks = append(risks, createRiskForTechnicalAsset(tA, "S3", "Security Best Practices for AWS S3"))
+ if tA.IsTaggedWithAnyTraversingUp(input, "aws:s3") {
+ risks = append(risks, r.createRiskForTechnicalAsset(input, tA, "S3", "Security Best Practices for AWS S3"))
}
// TODO add more tag-specific risks like also for aws:lambda etc. here
}
@@ -269,13 +276,13 @@ func GenerateRisks() []model.Risk {
return risks
}
-func addTrustBoundaryAccordingToBasetag(trustBoundary model.TrustBoundary,
+func (r *MissingCloudHardeningRule) addTrustBoundaryAccordingToBaseTag(trustBoundary types.TrustBoundary,
trustBoundariesWithUnspecificCloudRisks map[string]bool,
trustBoundaryIDsAWS map[string]bool,
trustBoundaryIDsAzure map[string]bool,
trustBoundaryIDsGCP map[string]bool,
trustBoundaryIDsOCP map[string]bool) {
- if trustBoundary.IsTaggedWithAny(SupportedTags()...) {
+ if trustBoundary.IsTaggedWithAny(r.SupportedTags()...) {
if trustBoundary.IsTaggedWithBaseTag("aws") {
trustBoundaryIDsAWS[trustBoundary.Id] = true
}
@@ -293,13 +300,13 @@ func addTrustBoundaryAccordingToBasetag(trustBoundary model.TrustBoundary,
}
}
-func addSharedRuntimeAccordingToBasetag(sharedRuntime model.SharedRuntime,
+func (r *MissingCloudHardeningRule) addSharedRuntimeAccordingToBaseTag(sharedRuntime types.SharedRuntime,
sharedRuntimesWithUnspecificCloudRisks map[string]bool,
sharedRuntimeIDsAWS map[string]bool,
sharedRuntimeIDsAzure map[string]bool,
sharedRuntimeIDsGCP map[string]bool,
sharedRuntimeIDsOCP map[string]bool) {
- if sharedRuntime.IsTaggedWithAny(SupportedTags()...) {
+ if sharedRuntime.IsTaggedWithAny(r.SupportedTags()...) {
if sharedRuntime.IsTaggedWithBaseTag("aws") {
sharedRuntimeIDsAWS[sharedRuntime.Id] = true
}
@@ -317,38 +324,38 @@ func addSharedRuntimeAccordingToBasetag(sharedRuntime model.SharedRuntime,
}
}
-func addAccordingToBasetag(techAsset model.TechnicalAsset, tags []string,
+func addAccordingToBaseTag(techAsset types.TechnicalAsset, tags []string,
techAssetIDsWithTagSpecificCloudRisks map[string]bool,
techAssetIDsAWS map[string]bool,
techAssetIDsAzure map[string]bool,
techAssetIDsGCP map[string]bool,
techAssetIDsOCP map[string]bool) {
- if techAsset.IsTaggedWithAny(specificSubtagsAWS...) {
+ if techAsset.IsTaggedWithAny(specificSubTagsAWS...) {
techAssetIDsWithTagSpecificCloudRisks[techAsset.Id] = true
}
- if model.IsTaggedWithBaseTag(tags, "aws") {
+ if types.IsTaggedWithBaseTag(tags, "aws") {
techAssetIDsAWS[techAsset.Id] = true
}
- if model.IsTaggedWithBaseTag(tags, "azure") {
+ if types.IsTaggedWithBaseTag(tags, "azure") {
techAssetIDsAzure[techAsset.Id] = true
}
- if model.IsTaggedWithBaseTag(tags, "gcp") {
+ if types.IsTaggedWithBaseTag(tags, "gcp") {
techAssetIDsGCP[techAsset.Id] = true
}
- if model.IsTaggedWithBaseTag(tags, "ocp") {
+ if types.IsTaggedWithBaseTag(tags, "ocp") {
techAssetIDsOCP[techAsset.Id] = true
}
}
-func findMostSensitiveTechnicalAsset(techAssets map[string]bool) model.TechnicalAsset {
- var mostRelevantAsset model.TechnicalAsset
+func findMostSensitiveTechnicalAsset(input *types.ParsedModel, techAssets map[string]bool) types.TechnicalAsset {
+ var mostRelevantAsset types.TechnicalAsset
keys := make([]string, 0, len(techAssets))
for k := range techAssets {
keys = append(keys, k)
}
sort.Strings(keys)
for _, id := range keys {
- tA := model.ParsedModelRoot.TechnicalAssets[id]
+ tA := input.TechnicalAssets[id]
if mostRelevantAsset.IsZero() || tA.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
mostRelevantAsset = tA
}
@@ -356,7 +363,7 @@ func findMostSensitiveTechnicalAsset(techAssets map[string]bool) model.Technical
return mostRelevantAsset
}
-func createRiskForSharedRuntime(sharedRuntime model.SharedRuntime, prefix, details string) model.Risk {
+func (r *MissingCloudHardeningRule) createRiskForSharedRuntime(input *types.ParsedModel, sharedRuntime types.SharedRuntime, prefix, details string) types.Risk {
if len(prefix) > 0 {
prefix = " (" + prefix + ")"
}
@@ -364,33 +371,33 @@ func createRiskForSharedRuntime(sharedRuntime model.SharedRuntime, prefix, detai
if len(details) > 0 {
title += ": " + details + ""
}
- impact := model.MediumImpact
- if sharedRuntime.HighestConfidentiality() >= model.Confidential ||
- sharedRuntime.HighestIntegrity() >= model.Critical ||
- sharedRuntime.HighestAvailability() >= model.Critical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if sharedRuntime.HighestConfidentiality(input) >= types.Confidential ||
+ sharedRuntime.HighestIntegrity(input) >= types.Critical ||
+ sharedRuntime.HighestAvailability(input) >= types.Critical {
+ impact = types.HighImpact
}
- if sharedRuntime.HighestConfidentiality() == model.StrictlyConfidential ||
- sharedRuntime.HighestIntegrity() == model.MissionCritical ||
- sharedRuntime.HighestAvailability() == model.MissionCritical {
- impact = model.VeryHighImpact
+ if sharedRuntime.HighestConfidentiality(input) == types.StrictlyConfidential ||
+ sharedRuntime.HighestIntegrity(input) == types.MissionCritical ||
+ sharedRuntime.HighestAvailability(input) == types.MissionCritical {
+ impact = types.VeryHighImpact
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantSharedRuntimeId: sharedRuntime.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: sharedRuntime.TechnicalAssetsRunning,
}
- risk.SyntheticId = risk.Category.Id + "@" + sharedRuntime.Id
+ risk.SyntheticId = risk.CategoryId + "@" + sharedRuntime.Id
return risk
}
-func createRiskForTrustBoundary(trustBoundary model.TrustBoundary, prefix, details string) model.Risk {
+func (r *MissingCloudHardeningRule) createRiskForTrustBoundary(parsedModel *types.ParsedModel, trustBoundary types.TrustBoundary, prefix, details string) types.Risk {
if len(prefix) > 0 {
prefix = " (" + prefix + ")"
}
@@ -398,33 +405,33 @@ func createRiskForTrustBoundary(trustBoundary model.TrustBoundary, prefix, detai
if len(details) > 0 {
title += ": " + details + ""
}
- impact := model.MediumImpact
- if trustBoundary.HighestConfidentiality() >= model.Confidential ||
- trustBoundary.HighestIntegrity() >= model.Critical ||
- trustBoundary.HighestAvailability() >= model.Critical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if trustBoundary.HighestConfidentiality(parsedModel) >= types.Confidential ||
+ trustBoundary.HighestIntegrity(parsedModel) >= types.Critical ||
+ trustBoundary.HighestAvailability(parsedModel) >= types.Critical {
+ impact = types.HighImpact
}
- if trustBoundary.HighestConfidentiality() == model.StrictlyConfidential ||
- trustBoundary.HighestIntegrity() == model.MissionCritical ||
- trustBoundary.HighestAvailability() == model.MissionCritical {
- impact = model.VeryHighImpact
+ if trustBoundary.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ trustBoundary.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ trustBoundary.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.VeryHighImpact
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTrustBoundaryId: trustBoundary.Id,
- DataBreachProbability: model.Probable,
- DataBreachTechnicalAssetIDs: trustBoundary.RecursivelyAllTechnicalAssetIDsInside(),
+ DataBreachProbability: types.Probable,
+ DataBreachTechnicalAssetIDs: trustBoundary.RecursivelyAllTechnicalAssetIDsInside(parsedModel),
}
- risk.SyntheticId = risk.Category.Id + "@" + trustBoundary.Id
+ risk.SyntheticId = risk.CategoryId + "@" + trustBoundary.Id
return risk
}
-func createRiskForTechnicalAsset(technicalAsset model.TechnicalAsset, prefix, details string) model.Risk {
+func (r *MissingCloudHardeningRule) createRiskForTechnicalAsset(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, prefix, details string) types.Risk {
if len(prefix) > 0 {
prefix = " (" + prefix + ")"
}
@@ -432,28 +439,28 @@ func createRiskForTechnicalAsset(technicalAsset model.TechnicalAsset, prefix, de
if len(details) > 0 {
title += ": " + details + ""
}
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical ||
- technicalAsset.HighestAvailability() >= model.Critical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(parsedModel) >= types.Critical ||
+ technicalAsset.HighestAvailability(parsedModel) >= types.Critical {
+ impact = types.HighImpact
}
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.VeryHighImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.VeryHighImpact
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/missing-file-validation/missing-file-validation-rule.go b/pkg/security/risks/builtin/missing-file-validation-rule.go
similarity index 55%
rename from risks/built-in/missing-file-validation/missing-file-validation-rule.go
rename to pkg/security/risks/builtin/missing-file-validation-rule.go
index c8633038..2a7c388d 100644
--- a/risks/built-in/missing-file-validation/missing-file-validation-rule.go
+++ b/pkg/security/risks/builtin/missing-file-validation-rule.go
@@ -1,11 +1,17 @@
-package missing_file_validation
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingFileValidationRule struct{}
+
+func NewMissingFileValidationRule() *MissingFileValidationRule {
+ return &MissingFileValidationRule{}
+}
+
+func (*MissingFileValidationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-file-validation",
Title: "Missing File Validation",
Description: "When a technical asset accepts files, these input files should be strictly validated about filename and type.",
@@ -15,14 +21,14 @@ func Category() model.RiskCategory {
Action: "File Validation",
Mitigation: "Filter by file extension and discard (if feasible) the name provided. Whitelist the accepted file types " +
"and determine the mime-type on the server-side (for example via \"Apache Tika\" or similar checks). If the file is retrievable by " +
- "endusers and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they " +
+ "end users and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they " +
"were uploaded, also apply a fresh malware scan during retrieval to scan with newer signatures of popular malware). Also enforce " +
"limits on maximum file size to avoid denial-of-service like scenarios.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Spoofing,
+ Function: types.Development,
+ STRIDE: types.Spoofing,
DetectionLogic: "In-scope technical assets with custom-developed code accepting file data formats.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) files can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -30,44 +36,44 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MissingFileValidationRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *MissingFileValidationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.OutOfScope || !technicalAsset.CustomDevelopedParts {
continue
}
for _, format := range technicalAsset.DataFormatsAccepted {
- if format == model.File {
- risks = append(risks, createRisk(technicalAsset))
+ if format == types.File {
+ risks = append(risks, r.createRisk(input, technicalAsset))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *MissingFileValidationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Missing File Validation risk at " + technicalAsset.Title + ""
- impact := model.LowImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.MediumImpact
+ impact := types.LowImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(input) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(input) == types.MissionCritical {
+ impact = types.MediumImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.VeryLikely, impact),
- ExploitationLikelihood: model.VeryLikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.VeryLikely, impact),
+ ExploitationLikelihood: types.VeryLikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/pkg/security/risks/builtin/missing-hardening-rule.go b/pkg/security/risks/builtin/missing-hardening-rule.go
new file mode 100644
index 00000000..60d2f6e5
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-hardening-rule.go
@@ -0,0 +1,78 @@
+package builtin
+
+import (
+ "strconv"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingHardeningRule struct {
+ raaLimit int
+ raaLimitReduced int
+}
+
+func NewMissingHardeningRule() *MissingHardeningRule {
+ return &MissingHardeningRule{raaLimit: 55, raaLimitReduced: 40}
+}
+
+func (r *MissingHardeningRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-hardening",
+ Title: "Missing Hardening",
+ Description: "Technical assets with a Relative Attacker Attractiveness (RAA) value of " + strconv.Itoa(r.raaLimit) + " % or higher should be " +
+ "explicitly hardened taking best practices and vendor hardening guides into account.",
+ Impact: "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.",
+ ASVS: "V14 - Configuration Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ Action: "System Hardening",
+ Mitigation: "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor " +
+ "recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Operations,
+ STRIDE: types.Tampering,
+ DetectionLogic: "In-scope technical assets with RAA values of " + strconv.Itoa(r.raaLimit) + " % or higher. " +
+ "Generally for high-value targets like data stores, application servers, identity providers and ERP systems this limit is reduced to " + strconv.Itoa(r.raaLimitReduced) + " %",
+ RiskAssessment: "The risk rating depends on the sensitivity of the data processed in the technical asset.",
+ FalsePositives: "Usually no false positives.",
+ ModelFailurePossibleReason: false,
+ CWE: 16,
+ }
+}
+
+func (*MissingHardeningRule) SupportedTags() []string {
+ return []string{"tomcat"}
+}
+
+func (r *MissingHardeningRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope {
+ if technicalAsset.RAA >= float64(r.raaLimit) || (technicalAsset.RAA >= float64(r.raaLimitReduced) &&
+ (technicalAsset.Type == types.Datastore || technicalAsset.Technology == types.ApplicationServer || technicalAsset.Technology == types.IdentityProvider || technicalAsset.Technology == types.ERP)) {
+ risks = append(risks, r.createRisk(input, technicalAsset))
+ }
+ }
+ }
+ return risks
+}
+
+func (r *MissingHardeningRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
+ title := "Missing Hardening risk at " + technicalAsset.Title + ""
+ impact := types.LowImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical {
+ impact = types.MediumImpact
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Likely, impact),
+ ExploitationLikelihood: types.Likely,
+ ExploitationImpact: impact,
+ Title: title,
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/pkg/security/risks/builtin/missing-identity-propagation-rule.go b/pkg/security/risks/builtin/missing-identity-propagation-rule.go
new file mode 100644
index 00000000..03be30d1
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-identity-propagation-rule.go
@@ -0,0 +1,106 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingIdentityPropagationRule struct{}
+
+func NewMissingIdentityPropagationRule() *MissingIdentityPropagationRule {
+ return &MissingIdentityPropagationRule{}
+}
+
+func (*MissingIdentityPropagationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-identity-propagation",
+ Title: "Missing Identity Propagation",
+ Description: "Technical assets (especially multi-tenant systems), which usually process data for end users should " +
+ "authorize every request based on the identity of the end user when the data flow is authenticated (i.e. non-public). " +
+ "For DevOps usages at least a technical-user authorization is required.",
+ Impact: "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within " +
+ "the system due to missing resource-based authorization checks.",
+ ASVS: "V4 - Access Control Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
+ Action: "Identity Propagation and Resource-based Authorization",
+ Mitigation: "When processing requests for end users if possible authorize in the backend against the propagated " +
+ "identity of the end user. This can be achieved in passing JWTs or similar tokens and checking them in the backend " +
+ "services. For DevOps usages apply at least a technical-user authorization.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope service-like technical assets which usually process data based on end user requests, if authenticated " +
+ "(i.e. non-public), should authorize incoming requests based on the propagated end user identity when their rating is sensitive. " +
+ "This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). " +
+ "DevOps usages are exempted from this risk.",
+ RiskAssessment: "The risk rating (medium or high) " +
+ "depends on the confidentiality, integrity, and availability rating of the technical asset.",
+ FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
+ "can be considered as false positives after individual review.",
+ ModelFailurePossibleReason: false,
+ CWE: 284,
+ }
+}
+
+func (*MissingIdentityPropagationRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingIdentityPropagationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.OutOfScope {
+ continue
+ }
+ if technicalAsset.Technology.IsUsuallyProcessingEndUserRequests() &&
+ (technicalAsset.Confidentiality >= types.Confidential ||
+ technicalAsset.Integrity >= types.Critical ||
+ technicalAsset.Availability >= types.Critical ||
+ (technicalAsset.MultiTenant &&
+ (technicalAsset.Confidentiality >= types.Restricted ||
+ technicalAsset.Integrity >= types.Important ||
+ technicalAsset.Availability >= types.Important))) {
+ // check each incoming authenticated data flow
+ commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ for _, commLink := range commLinks {
+ caller := input.TechnicalAssets[commLink.SourceId]
+ if !caller.Technology.IsUsuallyAbleToPropagateIdentityToOutgoingTargets() || caller.Type == types.Datastore {
+ continue
+ }
+ if commLink.Authentication != types.NoneAuthentication &&
+ commLink.Authorization != types.EndUserIdentityPropagation {
+ if commLink.Usage == types.DevOps && commLink.Authorization != types.NoneAuthorization {
+ continue
+ }
+ highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical ||
+ technicalAsset.Availability == types.MissionCritical
+ risks = append(risks, r.createRisk(input, technicalAsset, commLink, highRisk))
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func (r *MissingIdentityPropagationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink, moreRisky bool) types.Risk {
+ impact := types.LowImpact
+ if moreRisky {
+ impact = types.MediumImpact
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: "Missing End User Identity Propagation over communication link " + incomingAccess.Title + " " +
+ "from " + input.TechnicalAssets[incomingAccess.SourceId].Title + " " +
+ "to " + technicalAsset.Title + "",
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ MostRelevantCommunicationLinkId: incomingAccess.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + incomingAccess.Id + "@" + input.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go b/pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go
similarity index 54%
rename from risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go
rename to pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go
index acd44afd..1d47be95 100644
--- a/risks/built-in/missing-identity-provider-isolation/missing-identity-provider-isolation-rule.go
+++ b/pkg/security/risks/builtin/missing-identity-provider-isolation-rule.go
@@ -1,29 +1,35 @@
-package missing_identity_provider_isolation
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingIdentityProviderIsolationRule struct{}
+
+func NewMissingIdentityProviderIsolationRule() *MissingIdentityProviderIsolationRule {
+ return &MissingIdentityProviderIsolationRule{}
+}
+
+func (*MissingIdentityProviderIsolationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-identity-provider-isolation",
Title: "Missing Identity Provider Isolation",
- Description: "Highly sensitive identity provider assets and their identity datastores should be isolated from other assets " +
- "by their own network segmentation trust-boundary (" + model.ExecutionEnvironment.String() + " boundaries do not count as network isolation).",
+ Description: "Highly sensitive identity provider assets and their identity data stores should be isolated from other assets " +
+ "by their own network segmentation trust-boundary (" + types.ExecutionEnvironment.String() + " boundaries do not count as network isolation).",
Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " +
- "highly sensitive identity provider assets and their identity datastores, as they are not separated by network segmentation.",
+ "highly sensitive identity provider assets and their identity data stores, as they are not separated by network segmentation.",
ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
Action: "Network Segmentation",
- Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity datastores.",
+ Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity data stores.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope identity provider assets and their identity datastores " +
+ Function: types.Operations,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope identity provider assets and their identity data stores " +
"when surrounded by other (not identity-related) assets (without a network trust-boundary in-between). " +
"This risk is especially prevalent when other non-identity related assets are within the same execution environment (i.e. same database or same application server).",
- RiskAssessment: "Default is " + model.HighImpact.String() + " impact. The impact is increased to " + model.VeryHighImpact.String() + " when the asset missing the " +
- "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".",
+ RiskAssessment: "Default is " + types.HighImpact.String() + " impact. The impact is increased to " + types.VeryHighImpact.String() + " when the asset missing the " +
+ "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".",
FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " +
"identity providers with data of highest sensitivity.",
ModelFailurePossibleReason: false,
@@ -31,63 +37,63 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MissingIdentityProviderIsolationRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
+func (r *MissingIdentityProviderIsolationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
if !technicalAsset.OutOfScope && technicalAsset.Technology.IsIdentityRelated() {
- moreImpact := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical ||
- technicalAsset.Availability == model.MissionCritical
+ moreImpact := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical ||
+ technicalAsset.Availability == types.MissionCritical
sameExecutionEnv := false
createRiskEntry := false
// now check for any other same-network assets of non-identity-related types
- for sparringAssetCandidateId, _ := range model.ParsedModelRoot.TechnicalAssets { // so inner loop again over all assets
+ for sparringAssetCandidateId := range input.TechnicalAssets { // so inner loop again over all assets
if technicalAsset.Id != sparringAssetCandidateId {
- sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId]
+ sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId]
if !sparringAssetCandidate.Technology.IsIdentityRelated() && !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() {
- if technicalAsset.IsSameExecutionEnvironment(sparringAssetCandidateId) {
+ if technicalAsset.IsSameExecutionEnvironment(input, sparringAssetCandidateId) {
createRiskEntry = true
sameExecutionEnv = true
- } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) {
+ } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) {
createRiskEntry = true
}
}
}
}
if createRiskEntry {
- risks = append(risks, createRisk(technicalAsset, moreImpact, sameExecutionEnv))
+ risks = append(risks, r.createRisk(technicalAsset, moreImpact, sameExecutionEnv))
}
}
}
return risks
}
-func createRisk(techAsset model.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) model.Risk {
- impact := model.HighImpact
- likelihood := model.Unlikely
+func (r *MissingIdentityProviderIsolationRule) createRisk(techAsset types.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) types.Risk {
+ impact := types.HighImpact
+ likelihood := types.Unlikely
others := "in the same network segment"
if moreImpact {
- impact = model.VeryHighImpact
+ impact = types.VeryHighImpact
}
if sameExecutionEnv {
- likelihood = model.Likely
+ likelihood = types.Likely
others = "in the same execution environment"
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: "Missing Identity Provider Isolation to further encapsulate and protect identity-related asset " + techAsset.Title + " against unrelated " +
"lower protected assets " + others + ", which might be easier to compromise by attackers",
MostRelevantTechnicalAssetId: techAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{techAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id
return risk
}
diff --git a/pkg/security/risks/builtin/missing-identity-store-rule.go b/pkg/security/risks/builtin/missing-identity-store-rule.go
new file mode 100644
index 00000000..9bdb1d2d
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-identity-store-rule.go
@@ -0,0 +1,101 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingIdentityStoreRule struct{}
+
+func NewMissingIdentityStoreRule() *MissingIdentityStoreRule {
+ return &MissingIdentityStoreRule{}
+}
+
+func (*MissingIdentityStoreRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-identity-store",
+ Title: "Missing Identity Store",
+ Description: "The modeled architecture does not contain an identity store, which might be the risk of a model missing " +
+ "critical assets (and thus not seeing their risks).",
+ Impact: "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store " +
+ "that is currently missing in the model.",
+ ASVS: "V2 - Authentication Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
+ Action: "Identity Store",
+ Mitigation: "Include an identity store in the model if the application has a login.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.Spoofing,
+ DetectionLogic: "Models with authenticated data-flows authorized via end user identity missing an in-scope identity store.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the end user-identity authorized technical assets and " +
+ "their data assets processed.",
+ FalsePositives: "Models only offering data/services without any real authentication need " +
+ "can be considered as false positives after individual review.",
+ ModelFailurePossibleReason: true,
+ CWE: 287,
+ }
+}
+
+func (*MissingIdentityStoreRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingIdentityStoreRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
+ if !technicalAsset.OutOfScope &&
+ (technicalAsset.Technology == types.IdentityStoreLDAP || technicalAsset.Technology == types.IdentityStoreDatabase) {
+ // everything fine, no risk, as we have an in-scope identity store in the model
+ return risks
+ }
+ }
+ // now check if we have end user identity authorized communication links, then it's a risk
+ riskIdentified := false
+ var mostRelevantAsset types.TechnicalAsset
+ impact := types.LowImpact
+ for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset
+ technicalAsset := input.TechnicalAssets[id]
+ for _, commLink := range technicalAsset.CommunicationLinksSorted() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset
+ if commLink.Authorization == types.EndUserIdentityPropagation {
+ riskIdentified = true
+ targetAsset := input.TechnicalAssets[commLink.TargetId]
+ if impact == types.LowImpact {
+ mostRelevantAsset = targetAsset
+ if targetAsset.HighestConfidentiality(input) >= types.Confidential ||
+ targetAsset.HighestIntegrity(input) >= types.Critical ||
+ targetAsset.HighestAvailability(input) >= types.Critical {
+ impact = types.MediumImpact
+ }
+ }
+ if targetAsset.Confidentiality >= types.Confidential ||
+ targetAsset.Integrity >= types.Critical ||
+ targetAsset.Availability >= types.Critical {
+ impact = types.MediumImpact
+ }
+ // just for referencing the most interesting asset
+ if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
+ mostRelevantAsset = technicalAsset
+ }
+ }
+ }
+ }
+ if riskIdentified {
+ risks = append(risks, r.createRisk(mostRelevantAsset, impact))
+ }
+ return risks
+}
+
+func (r *MissingIdentityStoreRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk {
+ title := "Missing Identity Store in the threat model (referencing asset " + technicalAsset.Title + " as an example)"
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: title,
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/pkg/security/risks/builtin/missing-network-segmentation-rule.go b/pkg/security/risks/builtin/missing-network-segmentation-rule.go
new file mode 100644
index 00000000..659d01bf
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-network-segmentation-rule.go
@@ -0,0 +1,104 @@
+package builtin
+
+import (
+ "sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingNetworkSegmentationRule struct {
+ raaLimit int
+}
+
+func NewMissingNetworkSegmentationRule() *MissingNetworkSegmentationRule {
+ return &MissingNetworkSegmentationRule{raaLimit: 50}
+}
+
+func (*MissingNetworkSegmentationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-network-segmentation",
+ Title: "Missing Network Segmentation",
+ Description: "Highly sensitive assets and/or data stores residing in the same network segment than other " +
+ "lower sensitive assets (like webservers or content management systems etc.) should be better protected " +
+ "by a network segmentation trust-boundary.",
+ Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " +
+ "more valuable targets, as they are not separated by network segmentation.",
+ ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ Action: "Network Segmentation",
+ Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive assets and/or data stores.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Operations,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope technical assets with high sensitivity and RAA values as well as data stores " +
+ "when surrounded by assets (without a network trust-boundary in-between) which are of type " + types.ClientSystem.String() + ", " +
+ types.WebServer.String() + ", " + types.WebApplication.String() + ", " + types.CMS.String() + ", " + types.WebServiceREST.String() + ", " + types.WebServiceSOAP.String() + ", " +
+ types.BuildPipeline.String() + ", " + types.SourcecodeRepository.String() + ", " + types.Monitoring.String() + ", or similar and there is no direct connection between these " +
+ "(hence no requirement to be so close to each other).",
+ RiskAssessment: "Default is " + types.LowSeverity.String() + " risk. The risk is increased to " + types.MediumSeverity.String() + " when the asset missing the " +
+ "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".",
+ FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " +
+ "containing/processing highly sensitive data.",
+ ModelFailurePossibleReason: false,
+ CWE: 1008,
+ }
+}
+
+func (*MissingNetworkSegmentationRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingNetworkSegmentationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order
+ // range over them in sorted (hence re-producible) way:
+ keys := make([]string, 0)
+ for k := range input.TechnicalAssets {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ technicalAsset := input.TechnicalAssets[key]
+ if !technicalAsset.OutOfScope && technicalAsset.Technology != types.ReverseProxy && technicalAsset.Technology != types.WAF && technicalAsset.Technology != types.IDS && technicalAsset.Technology != types.IPS && technicalAsset.Technology != types.ServiceRegistry {
+ if technicalAsset.RAA >= float64(r.raaLimit) && (technicalAsset.Type == types.Datastore || technicalAsset.Confidentiality >= types.Confidential ||
+ technicalAsset.Integrity >= types.Critical || technicalAsset.Availability >= types.Critical) {
+ // now check for any other same-network assets of certain types which have no direct connection
+ for _, sparringAssetCandidateId := range keys { // so inner loop again over all assets
+ if technicalAsset.Id != sparringAssetCandidateId {
+ sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId]
+ if sparringAssetCandidate.Technology.IsLessProtectedType() &&
+ technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) &&
+ !technicalAsset.HasDirectConnection(input, sparringAssetCandidateId) &&
+ !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() {
+ highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical || technicalAsset.Availability == types.MissionCritical
+ risks = append(risks, r.createRisk(technicalAsset, highRisk))
+ break
+ }
+ }
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func (r *MissingNetworkSegmentationRule) createRisk(techAsset types.TechnicalAsset, moreRisky bool) types.Risk {
+ impact := types.LowImpact
+ if moreRisky {
+ impact = types.MediumImpact
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: "Missing Network Segmentation to further encapsulate and protect " + techAsset.Title + " against unrelated " +
+ "lower protected assets in the same network segment, which might be easier to compromise by attackers",
+ MostRelevantTechnicalAssetId: techAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{techAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id
+ return risk
+}
diff --git a/pkg/security/risks/builtin/missing-vault-isolation-rule.go b/pkg/security/risks/builtin/missing-vault-isolation-rule.go
new file mode 100644
index 00000000..7a9d6497
--- /dev/null
+++ b/pkg/security/risks/builtin/missing-vault-isolation-rule.go
@@ -0,0 +1,103 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type MissingVaultIsolationRule struct{}
+
+func NewMissingVaultIsolationRule() *MissingVaultIsolationRule {
+ return &MissingVaultIsolationRule{}
+}
+
+func (*MissingVaultIsolationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "missing-vault-isolation",
+ Title: "Missing Vault Isolation",
+ Description: "Highly sensitive vault assets and their data stores should be isolated from other assets " +
+ "by their own network segmentation trust-boundary (" + types.ExecutionEnvironment.String() + " boundaries do not count as network isolation).",
+ Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " +
+ "highly sensitive vault assets and their data stores, as they are not separated by network segmentation.",
+ ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ Action: "Network Segmentation",
+ Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their data stores.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Operations,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope vault assets " +
+ "when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). " +
+ "This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).",
+ RiskAssessment: "Default is " + types.MediumImpact.String() + " impact. The impact is increased to " + types.HighImpact.String() + " when the asset missing the " +
+ "trust-boundary protection is rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + ".",
+ FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " +
+ "vaults with data of highest sensitivity.",
+ ModelFailurePossibleReason: false,
+ CWE: 1008,
+ }
+}
+
+func (*MissingVaultIsolationRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *MissingVaultIsolationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
+ if !technicalAsset.OutOfScope && technicalAsset.Technology == types.Vault {
+ moreImpact := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical ||
+ technicalAsset.Availability == types.MissionCritical
+ sameExecutionEnv := false
+ createRiskEntry := false
+ // now check for any other same-network assets of non-vault-related types
+ for sparringAssetCandidateId := range input.TechnicalAssets { // so inner loop again over all assets
+ if technicalAsset.Id != sparringAssetCandidateId {
+ sparringAssetCandidate := input.TechnicalAssets[sparringAssetCandidateId]
+ if sparringAssetCandidate.Technology != types.Vault && !isVaultStorage(input, technicalAsset, sparringAssetCandidate) {
+ if technicalAsset.IsSameExecutionEnvironment(input, sparringAssetCandidateId) {
+ createRiskEntry = true
+ sameExecutionEnv = true
+ } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, sparringAssetCandidateId) {
+ createRiskEntry = true
+ }
+ }
+ }
+ }
+ if createRiskEntry {
+ risks = append(risks, r.createRisk(technicalAsset, moreImpact, sameExecutionEnv))
+ }
+ }
+ }
+ return risks
+}
+
+func isVaultStorage(parsedModel *types.ParsedModel, vault types.TechnicalAsset, storage types.TechnicalAsset) bool {
+ return storage.Type == types.Datastore && vault.HasDirectConnection(parsedModel, storage.Id)
+}
+
+func (r *MissingVaultIsolationRule) createRisk(techAsset types.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) types.Risk {
+ impact := types.MediumImpact
+ likelihood := types.Unlikely
+ others := "in the same network segment"
+ if moreImpact {
+ impact = types.HighImpact
+ }
+ if sameExecutionEnv {
+ likelihood = types.Likely
+ others = "in the same execution environment"
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
+ ExploitationLikelihood: likelihood,
+ ExploitationImpact: impact,
+ Title: "Missing Vault Isolation to further encapsulate and protect vault-related asset " + techAsset.Title + " against unrelated " +
+ "lower protected assets " + others + ", which might be easier to compromise by attackers",
+ MostRelevantTechnicalAssetId: techAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{techAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + techAsset.Id
+ return risk
+}
diff --git a/risks/built-in/missing-vault/missing-vault-rule.go b/pkg/security/risks/builtin/missing-vault-rule.go
similarity index 55%
rename from risks/built-in/missing-vault/missing-vault-rule.go
rename to pkg/security/risks/builtin/missing-vault-rule.go
index a046131b..61e8e1c1 100644
--- a/risks/built-in/missing-vault/missing-vault-rule.go
+++ b/pkg/security/risks/builtin/missing-vault-rule.go
@@ -1,11 +1,17 @@
-package missing_vault
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingVaultRule struct{}
+
+func NewMissingVaultRule() *MissingVaultRule {
+ return &MissingVaultRule{}
+}
+
+func (*MissingVaultRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-vault",
Title: "Missing Vault (Secret Storage)",
Description: "In order to avoid the risk of secret leakage via config files (when attacked through vulnerabilities being able to " +
@@ -19,10 +25,10 @@ func Category() model.RiskCategory {
Action: "Vault (Secret Storage)",
Mitigation: "Consider using a Vault (Secret Storage) to securely store and access config secrets (like credentials, private keys, client certificates, etc.).",
Check: "Is a Vault (Secret Storage) in place?",
- Function: model.Architecture,
- STRIDE: model.InformationDisclosure,
+ Function: types.Architecture,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "Models without a Vault (Secret Storage).",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Models where no technical assets have any kind of sensitive config data to protect " +
"can be considered as false positives after individual review.",
ModelFailurePossibleReason: true,
@@ -30,29 +36,29 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MissingVaultRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
+func (r *MissingVaultRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
hasVault := false
- var mostRelevantAsset model.TechnicalAsset
- impact := model.LowImpact
- for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
- techAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if techAsset.Technology == model.Vault {
+ var mostRelevantAsset types.TechnicalAsset
+ impact := types.LowImpact
+ for _, id := range input.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with the highest sensitivity as example asset
+ techAsset := input.TechnicalAssets[id]
+ if techAsset.Technology == types.Vault {
hasVault = true
}
- if techAsset.HighestConfidentiality() >= model.Confidential ||
- techAsset.HighestIntegrity() >= model.Critical ||
- techAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
+ if techAsset.HighestConfidentiality(input) >= types.Confidential ||
+ techAsset.HighestIntegrity(input) >= types.Critical ||
+ techAsset.HighestAvailability(input) >= types.Critical {
+ impact = types.MediumImpact
}
- if techAsset.Confidentiality >= model.Confidential ||
- techAsset.Integrity >= model.Critical ||
- techAsset.Availability >= model.Critical {
- impact = model.MediumImpact
+ if techAsset.Confidentiality >= types.Confidential ||
+ techAsset.Integrity >= types.Critical ||
+ techAsset.Availability >= types.Critical {
+ impact = types.MediumImpact
}
// just for referencing the most interesting asset
if techAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
@@ -60,23 +66,23 @@ func GenerateRisks() []model.Risk {
}
}
if !hasVault {
- risks = append(risks, createRisk(mostRelevantAsset, impact))
+ risks = append(risks, r.createRisk(mostRelevantAsset, impact))
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk {
+func (r *MissingVaultRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact) types.Risk {
title := "Missing Vault (Secret Storage) in the threat model (referencing asset " + technicalAsset.Title + " as an example)"
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/missing-waf/missing-waf-rule.go b/pkg/security/risks/builtin/missing-waf-rule.go
similarity index 60%
rename from risks/built-in/missing-waf/missing-waf-rule.go
rename to pkg/security/risks/builtin/missing-waf-rule.go
index 684cc9af..f2c1132b 100644
--- a/risks/built-in/missing-waf/missing-waf-rule.go
+++ b/pkg/security/risks/builtin/missing-waf-rule.go
@@ -1,11 +1,17 @@
-package missing_waf
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MissingWafRule struct{}
+
+func NewMissingWafRule() *MissingWafRule {
+ return &MissingWafRule{}
+}
+
+func (*MissingWafRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "missing-waf",
Title: "Missing Web Application Firewall (WAF)",
Description: "To have a first line of filtering defense, security architectures with web-services or web-applications should include a WAF in front of them. " +
@@ -18,10 +24,10 @@ func Category() model.RiskCategory {
Mitigation: "Consider placing a Web Application Firewall (WAF) in front of the web-services and/or web-applications. For cloud environments many cloud providers offer " +
"pre-configured WAFs. Even reverse proxies can be enhances by a WAF component via ModSecurity plugins.",
Check: "Is a Web Application Firewall (WAF) in place?",
- Function: model.Operations,
- STRIDE: model.Tampering,
+ Function: types.Operations,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope web-services and/or web-applications accessed across a network trust boundary not having a Web Application Firewall (WAF) in front of them.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Targets only accessible via WAFs or reverse proxies containing a WAF component (like ModSecurity) can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -29,20 +35,20 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MissingWafRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
+func (r *MissingWafRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
if !technicalAsset.OutOfScope &&
(technicalAsset.Technology.IsWebApplication() || technicalAsset.Technology.IsWebService()) {
- for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
- if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() &&
+ for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
+ if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) &&
incomingAccess.Protocol.IsPotentialWebAccessProtocol() &&
- model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Technology != model.WAF {
- risks = append(risks, createRisk(technicalAsset))
+ input.TechnicalAssets[incomingAccess.SourceId].Technology != types.WAF {
+ risks = append(risks, r.createRisk(input, technicalAsset))
break
}
}
@@ -51,25 +57,25 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *MissingWafRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Missing Web Application Firewall (WAF) risk at " + technicalAsset.Title + ""
- likelihood := model.Unlikely
- impact := model.LowImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.MediumImpact
+ likelihood := types.Unlikely
+ impact := types.LowImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(input) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(input) == types.MissionCritical {
+ impact = types.MediumImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go b/pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go
similarity index 63%
rename from risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go
rename to pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go
index 3c9ab06b..24f07b16 100644
--- a/risks/built-in/mixed-targets-on-shared-runtime/mixed-targets-on-shared-runtime-rule.go
+++ b/pkg/security/risks/builtin/mixed-targets-on-shared-runtime-rule.go
@@ -1,12 +1,19 @@
-package mixed_targets_on_shared_runtime
+package builtin
import (
- "github.com/threagile/threagile/model"
"sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type MixedTargetsOnSharedRuntimeRule struct{}
+
+func NewMixedTargetsOnSharedRuntimeRule() *MixedTargetsOnSharedRuntimeRule {
+ return &MixedTargetsOnSharedRuntimeRule{}
+}
+
+func (*MixedTargetsOnSharedRuntimeRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "mixed-targets-on-shared-runtime",
Title: "Mixed Targets on Shared Runtime",
Description: "Different attacker targets (like frontend and backend/datastore components) should not be running on the same " +
@@ -20,8 +27,8 @@ func Category() model.RiskCategory {
"prevent load- or breach-related problems originating from one more attacker-facing asset impacts also the " +
"other more critical rated backend/datastore assets.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Operations,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "Shared runtime running technical assets of different trust-boundaries is at risk. " +
"Also mixing backend/datastore with frontend components on the same shared runtime is considered a risk.",
RiskAssessment: "The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of " +
@@ -33,31 +40,31 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*MixedTargetsOnSharedRuntimeRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
+func (r *MixedTargetsOnSharedRuntimeRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way:
keys := make([]string, 0)
- for k, _ := range model.ParsedModelRoot.SharedRuntimes {
+ for k := range input.SharedRuntimes {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
- sharedRuntime := model.ParsedModelRoot.SharedRuntimes[key]
+ sharedRuntime := input.SharedRuntimes[key]
currentTrustBoundaryId := ""
hasFrontend, hasBackend := false, false
riskAdded := false
for _, technicalAssetId := range sharedRuntime.TechnicalAssetsRunning {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[technicalAssetId]
- if len(currentTrustBoundaryId) > 0 && currentTrustBoundaryId != technicalAsset.GetTrustBoundaryId() {
- risks = append(risks, createRisk(sharedRuntime))
+ technicalAsset := input.TechnicalAssets[technicalAssetId]
+ if len(currentTrustBoundaryId) > 0 && currentTrustBoundaryId != technicalAsset.GetTrustBoundaryId(input) {
+ risks = append(risks, r.createRisk(input, sharedRuntime))
riskAdded = true
break
}
- currentTrustBoundaryId = technicalAsset.GetTrustBoundaryId()
+ currentTrustBoundaryId = technicalAsset.GetTrustBoundaryId(input)
if technicalAsset.Technology.IsExclusivelyFrontendRelated() {
hasFrontend = true
}
@@ -66,37 +73,37 @@ func GenerateRisks() []model.Risk {
}
}
if !riskAdded && hasFrontend && hasBackend {
- risks = append(risks, createRisk(sharedRuntime))
+ risks = append(risks, r.createRisk(input, sharedRuntime))
}
}
return risks
}
-func createRisk(sharedRuntime model.SharedRuntime) model.Risk {
- impact := model.LowImpact
- if isMoreRisky(sharedRuntime) {
- impact = model.MediumImpact
+func (r *MixedTargetsOnSharedRuntimeRule) createRisk(input *types.ParsedModel, sharedRuntime types.SharedRuntime) types.Risk {
+ impact := types.LowImpact
+ if isMoreRisky(input, sharedRuntime) {
+ impact = types.MediumImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: "Mixed Targets on Shared Runtime named " + sharedRuntime.Title + " might enable attackers moving from one less " +
"valuable target to a more valuable one", // TODO list at least the assets in the text which are running on the shared HW
MostRelevantSharedRuntimeId: sharedRuntime.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: sharedRuntime.TechnicalAssetsRunning,
}
- risk.SyntheticId = risk.Category.Id + "@" + sharedRuntime.Id
+ risk.SyntheticId = risk.CategoryId + "@" + sharedRuntime.Id
return risk
}
-func isMoreRisky(sharedRuntime model.SharedRuntime) bool {
+func isMoreRisky(input *types.ParsedModel, sharedRuntime types.SharedRuntime) bool {
for _, techAssetId := range sharedRuntime.TechnicalAssetsRunning {
- techAsset := model.ParsedModelRoot.TechnicalAssets[techAssetId]
- if techAsset.Confidentiality == model.StrictlyConfidential || techAsset.Integrity == model.MissionCritical ||
- techAsset.Availability == model.MissionCritical {
+ techAsset := input.TechnicalAssets[techAssetId]
+ if techAsset.Confidentiality == types.StrictlyConfidential || techAsset.Integrity == types.MissionCritical ||
+ techAsset.Availability == types.MissionCritical {
return true
}
}
diff --git a/risks/built-in/path-traversal/path-traversal-rule.go b/pkg/security/risks/builtin/path-traversal-rule.go
similarity index 55%
rename from risks/built-in/path-traversal/path-traversal-rule.go
rename to pkg/security/risks/builtin/path-traversal-rule.go
index 1258c039..061b1dfb 100644
--- a/risks/built-in/path-traversal/path-traversal-rule.go
+++ b/pkg/security/risks/builtin/path-traversal-rule.go
@@ -1,15 +1,21 @@
-package path_traversal
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type PathTraversalRule struct{}
+
+func NewPathTraversalRule() *PathTraversalRule {
+ return &PathTraversalRule{}
+}
+
+func (*PathTraversalRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "path-traversal",
Title: "Path-Traversal",
Description: "When a filesystem is accessed Path-Traversal or Local-File-Inclusion (LFI) risks might arise. " +
- "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed or stored.",
+ "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
Impact: "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, " +
"business data files, etc.) from the filesystem of affected components.",
ASVS: "V12 - File and Resources Verification Requirements",
@@ -20,8 +26,8 @@ func Category() model.RiskCategory {
"(partly or fully) provided by the caller. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.InformationDisclosure,
+ Function: types.Development,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "Filesystems accessed by in-scope callers.",
RiskAssessment: "The risk rating depends on the sensitivity of the data stored inside the technical asset.",
FalsePositives: "File accesses by filenames not consisting of parts controllable by the caller can be considered " +
@@ -31,51 +37,51 @@ func Category() model.RiskCategory {
}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.Technology != model.FileServer && technicalAsset.Technology != model.LocalFileSystem {
+func (*PathTraversalRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *PathTraversalRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.Technology != types.FileServer && technicalAsset.Technology != types.LocalFileSystem {
continue
}
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
- if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
+ if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
- likelihood := model.VeryLikely
- if incomingFlow.Usage == model.DevOps {
- likelihood = model.Likely
+ likelihood := types.VeryLikely
+ if incomingFlow.Usage == types.DevOps {
+ likelihood = types.Likely
}
- risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
+ risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood))
}
}
return risks
}
-func SupportedTags() []string {
- return []string{}
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
- caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
+func (r *PathTraversalRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk {
+ caller := input.TechnicalAssets[incomingFlow.SourceId]
title := "Path-Traversal risk at " + caller.Title + " against filesystem " + technicalAsset.Title + "" +
" via " + incomingFlow.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical {
+ impact = types.HighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
}
diff --git a/risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go b/pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go
similarity index 57%
rename from risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go
rename to pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go
index d965ce18..6320ec5a 100644
--- a/risks/built-in/push-instead-of-pull-deployment/push-instead-of-pull-deployment-rule.go
+++ b/pkg/security/risks/builtin/push-instead-of-pull-deployment-rule.go
@@ -1,11 +1,17 @@
-package push_instead_of_pull_deployment
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type PushInsteadPullDeploymentRule struct{}
+
+func NewPushInsteadPullDeploymentRule() *PushInsteadPullDeploymentRule {
+ return &PushInsteadPullDeploymentRule{}
+}
+
+func (*PushInsteadPullDeploymentRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "push-instead-of-pull-deployment",
Title: "Push instead of Pull Deployment",
Description: "When comparing push-based vs. pull-based deployments from a security perspective, pull-based " +
@@ -19,8 +25,8 @@ func Category() model.RiskCategory {
Action: "Build Pipeline Hardening",
Mitigation: "Try to prefer pull-based deployments (like GitOps scenarios offer) over push-based deployments to reduce the attack surface of the production system.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Tampering,
+ Function: types.Architecture,
+ STRIDE: types.Tampering,
DetectionLogic: "Models with build pipeline components accessing in-scope targets of deployment (in a non-readonly way) which " +
"are not build-related components themselves.",
RiskAssessment: "The risk rating depends on the highest sensitivity of the deployment targets running custom-developed parts.",
@@ -31,25 +37,25 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*PushInsteadPullDeploymentRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- impact := model.LowImpact
- for _, buildPipeline := range model.ParsedModelRoot.TechnicalAssets {
- if buildPipeline.Technology == model.BuildPipeline {
+func (r *PushInsteadPullDeploymentRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ impact := types.LowImpact
+ for _, buildPipeline := range input.TechnicalAssets {
+ if buildPipeline.Technology == types.BuildPipeline {
for _, deploymentLink := range buildPipeline.CommunicationLinks {
- targetAsset := model.ParsedModelRoot.TechnicalAssets[deploymentLink.TargetId]
- if !deploymentLink.Readonly && deploymentLink.Usage == model.DevOps &&
- !targetAsset.OutOfScope && !targetAsset.Technology.IsDevelopmentRelevant() && targetAsset.Usage == model.Business {
- if targetAsset.HighestConfidentiality() >= model.Confidential ||
- targetAsset.HighestIntegrity() >= model.Critical ||
- targetAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
+ targetAsset := input.TechnicalAssets[deploymentLink.TargetId]
+ if !deploymentLink.Readonly && deploymentLink.Usage == types.DevOps &&
+ !targetAsset.OutOfScope && !targetAsset.Technology.IsDevelopmentRelevant() && targetAsset.Usage == types.Business {
+ if targetAsset.HighestConfidentiality(input) >= types.Confidential ||
+ targetAsset.HighestIntegrity(input) >= types.Critical ||
+ targetAsset.HighestAvailability(input) >= types.Critical {
+ impact = types.MediumImpact
}
- risks = append(risks, createRisk(buildPipeline, targetAsset, deploymentLink, impact))
+ risks = append(risks, r.createRisk(buildPipeline, targetAsset, deploymentLink, impact))
}
}
}
@@ -57,19 +63,19 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(buildPipeline model.TechnicalAsset, deploymentTarget model.TechnicalAsset, deploymentCommLink model.CommunicationLink, impact model.RiskExploitationImpact) model.Risk {
+func (r *PushInsteadPullDeploymentRule) createRisk(buildPipeline types.TechnicalAsset, deploymentTarget types.TechnicalAsset, deploymentCommLink types.CommunicationLink, impact types.RiskExploitationImpact) types.Risk {
title := "Push instead of Pull Deployment at " + deploymentTarget.Title + " via build pipeline asset " + buildPipeline.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: deploymentTarget.Id,
MostRelevantCommunicationLinkId: deploymentCommLink.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{deploymentTarget.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + buildPipeline.Id
+ risk.SyntheticId = risk.CategoryId + "@" + buildPipeline.Id
return risk
}
diff --git a/risks/built-in/search-query-injection/search-query-injection-rule.go b/pkg/security/risks/builtin/search-query-injection-rule.go
similarity index 55%
rename from risks/built-in/search-query-injection/search-query-injection-rule.go
rename to pkg/security/risks/builtin/search-query-injection-rule.go
index 936ab6e6..749df40e 100644
--- a/risks/built-in/search-query-injection/search-query-injection-rule.go
+++ b/pkg/security/risks/builtin/search-query-injection-rule.go
@@ -1,11 +1,17 @@
-package search_query_injection
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type SearchQueryInjectionRule struct{}
+
+func NewSearchQueryInjectionRule() *SearchQueryInjectionRule {
+ return &SearchQueryInjectionRule{}
+}
+
+func (*SearchQueryInjectionRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "search-query-injection",
Title: "Search-Query Injection",
Description: "When a search engine server is accessed Search-Query Injection risks might arise." +
@@ -21,10 +27,10 @@ func Category() model.RiskCategory {
"query unfiltered to the caller. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Tampering,
+ Function: types.Development,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope clients accessing search engine servers via typical search access protocols.",
- RiskAssessment: "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed or stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed.",
FalsePositives: "Server engine queries by search values not consisting of parts controllable by the caller can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -32,23 +38,27 @@ func Category() model.RiskCategory {
}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.Technology == model.SearchEngine || technicalAsset.Technology == model.SearchIndex {
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+func (*SearchQueryInjectionRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *SearchQueryInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.Technology == types.SearchEngine || technicalAsset.Technology == types.SearchIndex {
+ incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
- if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
+ if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
- if incomingFlow.Protocol == model.HTTP || incomingFlow.Protocol == model.HTTPS ||
- incomingFlow.Protocol == model.BINARY || incomingFlow.Protocol == model.BINARY_encrypted {
- likelihood := model.VeryLikely
- if incomingFlow.Usage == model.DevOps {
- likelihood = model.Likely
+ if incomingFlow.Protocol == types.HTTP || incomingFlow.Protocol == types.HTTPS ||
+ incomingFlow.Protocol == types.BINARY || incomingFlow.Protocol == types.BinaryEncrypted {
+ likelihood := types.VeryLikely
+ if incomingFlow.Usage == types.DevOps {
+ likelihood = types.Likely
}
- risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
+ risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow, likelihood))
}
}
}
@@ -56,31 +66,27 @@ func GenerateRisks() []model.Risk {
return risks
}
-func SupportedTags() []string {
- return []string{}
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
- caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
+func (r *SearchQueryInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink, likelihood types.RiskExploitationLikelihood) types.Risk {
+ caller := input.TechnicalAssets[incomingFlow.SourceId]
title := "Search Query Injection risk at " + caller.Title + " against search engine server " + technicalAsset.Title + "" +
" via " + incomingFlow.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.HighImpact
- } else if technicalAsset.HighestConfidentiality() <= model.Internal && technicalAsset.HighestIntegrity() == model.Operational {
- impact = model.LowImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical {
+ impact = types.HighImpact
+ } else if technicalAsset.HighestConfidentiality(input) <= types.Internal && technicalAsset.HighestIntegrity(input) == types.Operational {
+ impact = types.LowImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
}
diff --git a/risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go b/pkg/security/risks/builtin/server-side-request-forgery-rule.go
similarity index 62%
rename from risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go
rename to pkg/security/risks/builtin/server-side-request-forgery-rule.go
index 901c5fcf..45c3ce27 100644
--- a/risks/built-in/server-side-request-forgery/server-side-request-forgery-rule.go
+++ b/pkg/security/risks/builtin/server-side-request-forgery-rule.go
@@ -1,11 +1,17 @@
-package server_side_request_forgery
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type ServerSideRequestForgeryRule struct{}
+
+func NewServerSideRequestForgeryRule() *ServerSideRequestForgeryRule {
+ return &ServerSideRequestForgeryRule{}
+}
+
+func (*ServerSideRequestForgeryRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "server-side-request-forgery",
Title: "Server-Side Request Forgery (SSRF)",
Description: "When a server system (i.e. not a client) is accessing other server systems via typical web protocols " +
@@ -18,8 +24,8 @@ func Category() model.RiskCategory {
"controllable values. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.InformationDisclosure,
+ Function: types.Development,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "In-scope non-client systems accessing (using outgoing communication links) targets with either HTTP or HTTPS protocol.",
RiskAssessment: "The risk rating (low or medium) depends on the sensitivity of the data assets receivable via web protocols from " +
"targets within the same network trust-boundary as well on the sensitivity of the data assets receivable via web protocols from the target asset itself. " +
@@ -31,73 +37,73 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*ServerSideRequestForgeryRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.OutOfScope || technicalAsset.Technology.IsClient() || technicalAsset.Technology == model.LoadBalancer {
+func (r *ServerSideRequestForgeryRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if technicalAsset.OutOfScope || technicalAsset.Technology.IsClient() || technicalAsset.Technology == types.LoadBalancer {
continue
}
for _, outgoingFlow := range technicalAsset.CommunicationLinks {
if outgoingFlow.Protocol.IsPotentialWebAccessProtocol() {
- risks = append(risks, createRisk(technicalAsset, outgoingFlow))
+ risks = append(risks, r.createRisk(input, technicalAsset, outgoingFlow))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, outgoingFlow model.CommunicationLink) model.Risk {
- target := model.ParsedModelRoot.TechnicalAssets[outgoingFlow.TargetId]
+func (r *ServerSideRequestForgeryRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, outgoingFlow types.CommunicationLink) types.Risk {
+ target := input.TechnicalAssets[outgoingFlow.TargetId]
title := "Server-Side Request Forgery (SSRF) risk at " + technicalAsset.Title + " server-side web-requesting " +
"the target " + target.Title + " via " + outgoingFlow.Title + ""
- impact := model.LowImpact
+ impact := types.LowImpact
// check by the target itself (can be in another trust-boundary)
- if target.HighestConfidentiality() == model.StrictlyConfidential {
- impact = model.MediumImpact
+ if target.HighestConfidentiality(input) == types.StrictlyConfidential {
+ impact = types.MediumImpact
}
// check all potential attack targets within the same trust boundary (accessible via web protocols)
uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{})
uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true
- for _, potentialTargetAsset := range model.ParsedModelRoot.TechnicalAssets {
- if technicalAsset.IsSameTrustBoundaryNetworkOnly(potentialTargetAsset.Id) {
- for _, commLinkIncoming := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[potentialTargetAsset.Id] {
+ for _, potentialTargetAsset := range input.TechnicalAssets {
+ if technicalAsset.IsSameTrustBoundaryNetworkOnly(input, potentialTargetAsset.Id) {
+ for _, commLinkIncoming := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[potentialTargetAsset.Id] {
if commLinkIncoming.Protocol.IsPotentialWebAccessProtocol() {
uniqueDataBreachTechnicalAssetIDs[potentialTargetAsset.Id] = true
- if potentialTargetAsset.HighestConfidentiality() == model.StrictlyConfidential {
- impact = model.MediumImpact
+ if potentialTargetAsset.HighestConfidentiality(input) == types.StrictlyConfidential {
+ impact = types.MediumImpact
}
}
}
}
}
// adjust for cloud-based special risks
- if impact == model.LowImpact && model.ParsedModelRoot.TrustBoundaries[technicalAsset.GetTrustBoundaryId()].Type.IsWithinCloud() {
- impact = model.MediumImpact
+ if impact == types.LowImpact && input.TrustBoundaries[technicalAsset.GetTrustBoundaryId(input)].Type.IsWithinCloud() {
+ impact = types.MediumImpact
}
dataBreachTechnicalAssetIDs := make([]string, 0)
- for key, _ := range uniqueDataBreachTechnicalAssetIDs {
+ for key := range uniqueDataBreachTechnicalAssetIDs {
dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key)
}
- likelihood := model.Likely
- if outgoingFlow.Usage == model.DevOps {
- likelihood = model.Unlikely
+ likelihood := types.Likely
+ if outgoingFlow.Usage == types.DevOps {
+ likelihood = types.Unlikely
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: outgoingFlow.Id,
- DataBreachProbability: model.Possible,
+ DataBreachProbability: types.Possible,
DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs,
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + target.Id + "@" + outgoingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + target.Id + "@" + outgoingFlow.Id
return risk
}
diff --git a/pkg/security/risks/builtin/service-registry-poisoning-rule.go b/pkg/security/risks/builtin/service-registry-poisoning-rule.go
new file mode 100644
index 00000000..94be965c
--- /dev/null
+++ b/pkg/security/risks/builtin/service-registry-poisoning-rule.go
@@ -0,0 +1,79 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type ServiceRegistryPoisoningRule struct{}
+
+func NewServiceRegistryPoisoningRule() *ServiceRegistryPoisoningRule {
+ return &ServiceRegistryPoisoningRule{}
+}
+
+func (*ServiceRegistryPoisoningRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "service-registry-poisoning",
+ Title: "Service Registry Poisoning",
+ Description: "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.",
+ Impact: "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or " +
+ "malicious lookup and config data leading to breach of sensitive data.",
+ ASVS: "V10 - Malicious Code Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
+ Action: "Service Registry Integrity Check",
+ Mitigation: "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.Spoofing,
+ DetectionLogic: "In-scope service registries.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical assets accessing the service registry " +
+ "as well as the data assets processed.",
+ FalsePositives: "Service registries not used for service discovery " +
+ "can be considered as false positives after individual review.",
+ ModelFailurePossibleReason: false,
+ CWE: 693,
+ }
+}
+
+func (*ServiceRegistryPoisoningRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *ServiceRegistryPoisoningRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && technicalAsset.Technology == types.ServiceRegistry {
+ incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ risks = append(risks, r.createRisk(input, technicalAsset, incomingFlows))
+ }
+ }
+ return risks
+}
+
+func (r *ServiceRegistryPoisoningRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlows []types.CommunicationLink) types.Risk {
+ title := "Service Registry Poisoning risk at " + technicalAsset.Title + ""
+ impact := types.LowImpact
+
+ for _, incomingFlow := range incomingFlows {
+ caller := input.TechnicalAssets[incomingFlow.SourceId]
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical || technicalAsset.HighestAvailability(input) == types.MissionCritical ||
+ caller.HighestConfidentiality(input) == types.StrictlyConfidential || caller.HighestIntegrity(input) == types.MissionCritical || caller.HighestAvailability(input) == types.MissionCritical ||
+ incomingFlow.HighestConfidentiality(input) == types.StrictlyConfidential || incomingFlow.HighestIntegrity(input) == types.MissionCritical || incomingFlow.HighestAvailability(input) == types.MissionCritical {
+ impact = types.MediumImpact
+ break
+ }
+ }
+
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: title,
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: find all service-lookup-using tech assets, which then might use spoofed lookups?
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go b/pkg/security/risks/builtin/sql-nosql-injection-rule.go
similarity index 56%
rename from risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go
rename to pkg/security/risks/builtin/sql-nosql-injection-rule.go
index 331f54e8..f2684333 100644
--- a/risks/built-in/sql-nosql-injection/sql-nosql-injection-rule.go
+++ b/pkg/security/risks/builtin/sql-nosql-injection-rule.go
@@ -1,15 +1,21 @@
-package sql_nosql_injection
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type SqlNoSqlInjectionRule struct{}
+
+func NewSqlNoSqlInjectionRule() *SqlNoSqlInjectionRule {
+ return &SqlNoSqlInjectionRule{}
+}
+
+func (*SqlNoSqlInjectionRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "sql-nosql-injection",
Title: "SQL/NoSQL-Injection",
Description: "When a database is accessed via database access protocols SQL/NoSQL-Injection risks might arise. " +
- "The risk rating depends on the sensitivity technical asset itself and of the data assets processed or stored.",
+ "The risk rating depends on the sensitivity technical asset itself and of the data assets processed.",
Impact: "If this risk is unmitigated, attackers might be able to modify SQL/NoSQL queries to steal and modify data and eventually further escalate towards a deeper system penetration via code executions.",
ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html",
@@ -17,8 +23,8 @@ func Category() model.RiskCategory {
Mitigation: "Try to use parameter binding to be safe from injection vulnerabilities. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.Tampering,
+ Function: types.Development,
+ STRIDE: types.Tampering,
DetectionLogic: "Database accessed via typical database access protocols by in-scope clients.",
RiskAssessment: "The risk rating depends on the sensitivity of the data stored inside the database.",
FalsePositives: "Database accesses by queries not consisting of parts controllable by the caller can be considered " +
@@ -28,51 +34,51 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*SqlNoSqlInjectionRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+func (r *SqlNoSqlInjectionRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ incomingFlows := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
- if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
+ if input.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
- if incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(true) && (technicalAsset.Technology == model.Database || technicalAsset.Technology == model.IdentityStoreDatabase) ||
+ if incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(true) && (technicalAsset.Technology == types.Database || technicalAsset.Technology == types.IdentityStoreDatabase) ||
(incomingFlow.Protocol.IsPotentialDatabaseAccessProtocol(false)) {
- risks = append(risks, createRisk(technicalAsset, incomingFlow))
+ risks = append(risks, r.createRisk(input, technicalAsset, incomingFlow))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink) model.Risk {
- caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
+func (r *SqlNoSqlInjectionRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, incomingFlow types.CommunicationLink) types.Risk {
+ caller := input.TechnicalAssets[incomingFlow.SourceId]
title := "SQL/NoSQL-Injection risk at " + caller.Title + " against database " + technicalAsset.Title + "" +
" via " + incomingFlow.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential || technicalAsset.HighestIntegrity(input) == types.MissionCritical {
+ impact = types.HighImpact
}
- likelihood := model.VeryLikely
- if incomingFlow.Usage == model.DevOps {
- likelihood = model.Likely
+ likelihood := types.VeryLikely
+ if incomingFlow.Usage == types.DevOps {
+ likelihood = types.Likely
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
+ risk.SyntheticId = risk.CategoryId + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
}
diff --git a/risks/built-in/unchecked-deployment/unchecked-deployment-rule.go b/pkg/security/risks/builtin/unchecked-deployment-rule.go
similarity index 64%
rename from risks/built-in/unchecked-deployment/unchecked-deployment-rule.go
rename to pkg/security/risks/builtin/unchecked-deployment-rule.go
index 34baf45f..1f864ab0 100644
--- a/risks/built-in/unchecked-deployment/unchecked-deployment-rule.go
+++ b/pkg/security/risks/builtin/unchecked-deployment-rule.go
@@ -1,11 +1,17 @@
-package unchecked_deployment
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UncheckedDeploymentRule struct{}
+
+func NewUncheckedDeploymentRule() *UncheckedDeploymentRule {
+ return &UncheckedDeploymentRule{}
+}
+
+func (*UncheckedDeploymentRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unchecked-deployment",
Title: "Unchecked Deployment",
Description: "For each build-pipeline component Unchecked Deployment risks might arise when the build-pipeline " +
@@ -20,8 +26,8 @@ func Category() model.RiskCategory {
Mitigation: "Apply DevSecOps best-practices and use scanning tools to identify vulnerabilities in source- or byte-code," +
"dependencies, container layers, and optionally also via dynamic scans against running test systems.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Tampering,
+ Function: types.Architecture,
+ STRIDE: types.Tampering,
DetectionLogic: "All development-relevant technical assets.",
RiskAssessment: "The risk rating depends on the highest rating of the technical assets and data assets processed by deployment-receiving targets.",
FalsePositives: "When the build-pipeline does not build any software components it can be considered a false positive " +
@@ -31,39 +37,39 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*UncheckedDeploymentRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
+func (r *UncheckedDeploymentRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
if technicalAsset.Technology.IsDevelopmentRelevant() {
- risks = append(risks, createRisk(technicalAsset))
+ risks = append(risks, r.createRisk(input, technicalAsset))
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *UncheckedDeploymentRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "Unchecked Deployment risk at " + technicalAsset.Title + ""
// impact is depending on highest rating
- impact := model.LowImpact
+ impact := types.LowImpact
// data breach at all deployment targets
uniqueDataBreachTechnicalAssetIDs := make(map[string]interface{})
uniqueDataBreachTechnicalAssetIDs[technicalAsset.Id] = true
for _, codeDeploymentTargetCommLink := range technicalAsset.CommunicationLinks {
- if codeDeploymentTargetCommLink.Usage == model.DevOps {
+ if codeDeploymentTargetCommLink.Usage == types.DevOps {
for _, dataAssetID := range codeDeploymentTargetCommLink.DataAssetsSent {
// it appears to be code when elevated integrity rating of sent data asset
- if model.ParsedModelRoot.DataAssets[dataAssetID].Integrity >= model.Important {
+ if input.DataAssets[dataAssetID].Integrity >= types.Important {
// here we've got a deployment target which has its data assets at risk via deployment of backdoored code
uniqueDataBreachTechnicalAssetIDs[codeDeploymentTargetCommLink.TargetId] = true
- targetTechAsset := model.ParsedModelRoot.TechnicalAssets[codeDeploymentTargetCommLink.TargetId]
- if targetTechAsset.HighestConfidentiality() >= model.Confidential ||
- targetTechAsset.HighestIntegrity() >= model.Critical ||
- targetTechAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
+ targetTechAsset := input.TechnicalAssets[codeDeploymentTargetCommLink.TargetId]
+ if targetTechAsset.HighestConfidentiality(input) >= types.Confidential ||
+ targetTechAsset.HighestIntegrity(input) >= types.Critical ||
+ targetTechAsset.HighestAvailability(input) >= types.Critical {
+ impact = types.MediumImpact
}
break
}
@@ -71,20 +77,20 @@ func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
}
}
dataBreachTechnicalAssetIDs := make([]string, 0)
- for key, _ := range uniqueDataBreachTechnicalAssetIDs {
+ for key := range uniqueDataBreachTechnicalAssetIDs {
dataBreachTechnicalAssetIDs = append(dataBreachTechnicalAssetIDs, key)
}
// create risk
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Possible,
+ DataBreachProbability: types.Possible,
DataBreachTechnicalAssetIDs: dataBreachTechnicalAssetIDs,
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/pkg/security/risks/builtin/unencrypted-asset-rule.go b/pkg/security/risks/builtin/unencrypted-asset-rule.go
new file mode 100644
index 00000000..3b5d64dc
--- /dev/null
+++ b/pkg/security/risks/builtin/unencrypted-asset-rule.go
@@ -0,0 +1,98 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type UnencryptedAssetRule struct{}
+
+func NewUnencryptedAssetRule() *UnencryptedAssetRule {
+ return &UnencryptedAssetRule{}
+}
+
+func (*UnencryptedAssetRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "unencrypted-asset",
+ Title: "Unencrypted Technical Assets",
+ Description: "Due to the confidentiality rating of the technical asset itself and/or the processed data assets " +
+ "this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.",
+ Impact: "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.",
+ ASVS: "V6 - Stored Cryptography Verification Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html",
+ Action: "Encryption of Technical Asset",
+ Mitigation: "Apply encryption to the technical asset.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Operations,
+ STRIDE: types.InformationDisclosure,
+ DetectionLogic: "In-scope unencrypted technical assets (excluding " + types.ReverseProxy.String() +
+ ", " + types.LoadBalancer.String() + ", " + types.WAF.String() + ", " + types.IDS.String() +
+ ", " + types.IPS.String() + " and embedded components like " + types.Library.String() + ") " +
+ "storing data assets rated at least as " + types.Confidential.String() + " or " + types.Critical.String() + ". " +
+ "For technical assets storing data assets rated as " + types.StrictlyConfidential.String() + " or " + types.MissionCritical.String() + " the " +
+ "encryption must be of type " + types.DataWithEndUserIndividualKey.String() + ".",
+ // NOTE: the risk assesment does not only consider the CIs of the *stored* data-assets
+ RiskAssessment: "Depending on the confidentiality rating of the stored data-assets either medium or high risk.",
+ FalsePositives: "When all sensitive data stored within the asset is already fully encrypted on document or data level.",
+ ModelFailurePossibleReason: false,
+ CWE: 311,
+ }
+}
+
+func (*UnencryptedAssetRule) SupportedTags() []string {
+ return []string{}
+}
+
+// check for technical assets that should be encrypted due to their confidentiality
+
+func (r *UnencryptedAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && !isEncryptionWaiver(technicalAsset) &&
+ (technicalAsset.HighestConfidentiality(input) >= types.Confidential ||
+ technicalAsset.HighestIntegrity(input) >= types.Critical) {
+ verySensitive := technicalAsset.HighestConfidentiality(input) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(input) == types.MissionCritical
+ requiresEndUserKey := verySensitive && technicalAsset.Technology.IsUsuallyStoringEndUserData()
+ if technicalAsset.Encryption == types.NoneEncryption {
+ impact := types.MediumImpact
+ if verySensitive {
+ impact = types.HighImpact
+ }
+ risks = append(risks, r.createRisk(technicalAsset, impact, requiresEndUserKey))
+ } else if requiresEndUserKey &&
+ (technicalAsset.Encryption == types.Transparent || technicalAsset.Encryption == types.DataWithSymmetricSharedKey || technicalAsset.Encryption == types.DataWithAsymmetricSharedKey) {
+ risks = append(risks, r.createRisk(technicalAsset, types.MediumImpact, requiresEndUserKey))
+ }
+ }
+ }
+ return risks
+}
+
+// Simple routing assets like 'Reverse Proxy' or 'Load Balancer' usually don't have their own storage and thus have no
+// encryption requirement for the asset itself (though for the communication, but that's a different rule)
+
+func isEncryptionWaiver(asset types.TechnicalAsset) bool {
+ return asset.Technology == types.ReverseProxy || asset.Technology == types.LoadBalancer ||
+ asset.Technology == types.WAF || asset.Technology == types.IDS || asset.Technology == types.IPS ||
+ asset.Technology.IsEmbeddedComponent()
+}
+
+func (r *UnencryptedAssetRule) createRisk(technicalAsset types.TechnicalAsset, impact types.RiskExploitationImpact, requiresEndUserKey bool) types.Risk {
+ title := "Unencrypted Technical Asset named " + technicalAsset.Title + ""
+ if requiresEndUserKey {
+ title += " missing end user individual encryption with " + types.DataWithEndUserIndividualKey.String()
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: impact,
+ Title: title,
+ MostRelevantTechnicalAssetId: technicalAsset.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
+ return risk
+}
diff --git a/risks/built-in/unencrypted-communication/unencrypted-communication-rule.go b/pkg/security/risks/builtin/unencrypted-communication-rule.go
similarity index 52%
rename from risks/built-in/unencrypted-communication/unencrypted-communication-rule.go
rename to pkg/security/risks/builtin/unencrypted-communication-rule.go
index 0718540b..aee4181c 100644
--- a/risks/built-in/unencrypted-communication/unencrypted-communication-rule.go
+++ b/pkg/security/risks/builtin/unencrypted-communication-rule.go
@@ -1,11 +1,17 @@
-package unencrypted_communication
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UnencryptedCommunicationRule struct{}
+
+func NewUnencryptedCommunicationRule() *UnencryptedCommunicationRule {
+ return &UnencryptedCommunicationRule{}
+}
+
+func (*UnencryptedCommunicationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unencrypted-communication",
Title: "Unencrypted Communication",
Description: "Due to the confidentiality and/or integrity rating of the data assets transferred over the " +
@@ -16,9 +22,9 @@ func Category() model.RiskCategory {
Action: "Encryption of Communication Links",
Mitigation: "Apply transport layer encryption to the communication link.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.InformationDisclosure,
- DetectionLogic: "Unencrypted technical communication links of in-scope technical assets (excluding " + model.Monitoring.String() + " traffic as well as " + model.LocalFileAccess.String() + " and " + model.InProcessLibraryCall.String() + ") " +
+ Function: types.Operations,
+ STRIDE: types.InformationDisclosure,
+ DetectionLogic: "Unencrypted technical communication links of in-scope technical assets (excluding " + types.Monitoring.String() + " traffic as well as " + types.LocalFileAccess.String() + " and " + types.InProcessLibraryCall.String() + ") " +
"transferring sensitive data.", // TODO more detailed text required here
RiskAssessment: "Depending on the confidentiality rating of the transferred data-assets either medium or high risk.",
FalsePositives: "When all sensitive data sent over the communication link is already fully encrypted on document or data level. " +
@@ -28,43 +34,44 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*UnencryptedCommunicationRule) SupportedTags() []string {
return []string{}
}
// check for communication links that should be encrypted due to their confidentiality and/or integrity
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
+
+func (r *UnencryptedCommunicationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, technicalAsset := range input.TechnicalAssets {
for _, dataFlow := range technicalAsset.CommunicationLinks {
- transferringAuthData := dataFlow.Authentication != model.NoneAuthentication
- sourceAsset := model.ParsedModelRoot.TechnicalAssets[dataFlow.SourceId]
- targetAsset := model.ParsedModelRoot.TechnicalAssets[dataFlow.TargetId]
+ transferringAuthData := dataFlow.Authentication != types.NoneAuthentication
+ sourceAsset := input.TechnicalAssets[dataFlow.SourceId]
+ targetAsset := input.TechnicalAssets[dataFlow.TargetId]
if !technicalAsset.OutOfScope || !sourceAsset.OutOfScope {
if !dataFlow.Protocol.IsEncrypted() && !dataFlow.Protocol.IsProcessLocal() &&
- !sourceAsset.Technology.IsUnprotectedCommsTolerated() &&
- !targetAsset.Technology.IsUnprotectedCommsTolerated() {
+ !sourceAsset.Technology.IsUnprotectedCommunicationsTolerated() &&
+ !targetAsset.Technology.IsUnprotectedCommunicationsTolerated() {
addedOne := false
for _, sentDataAsset := range dataFlow.DataAssetsSent {
- dataAsset := model.ParsedModelRoot.DataAssets[sentDataAsset]
+ dataAsset := input.DataAssets[sentDataAsset]
if isHighSensitivity(dataAsset) || transferringAuthData {
- risks = append(risks, createRisk(technicalAsset, dataFlow, true, transferringAuthData))
+ risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, true, transferringAuthData))
addedOne = true
break
} else if !dataFlow.VPN && isMediumSensitivity(dataAsset) {
- risks = append(risks, createRisk(technicalAsset, dataFlow, false, transferringAuthData))
+ risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, false, transferringAuthData))
addedOne = true
break
}
}
if !addedOne {
for _, receivedDataAsset := range dataFlow.DataAssetsReceived {
- dataAsset := model.ParsedModelRoot.DataAssets[receivedDataAsset]
+ dataAsset := input.DataAssets[receivedDataAsset]
if isHighSensitivity(dataAsset) || transferringAuthData {
- risks = append(risks, createRisk(technicalAsset, dataFlow, true, transferringAuthData))
+ risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, true, transferringAuthData))
break
} else if !dataFlow.VPN && isMediumSensitivity(dataAsset) {
- risks = append(risks, createRisk(technicalAsset, dataFlow, false, transferringAuthData))
+ risks = append(risks, r.createRisk(input, technicalAsset, dataFlow, false, transferringAuthData))
break
}
}
@@ -76,43 +83,43 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, dataFlow model.CommunicationLink, highRisk bool, transferringAuthData bool) model.Risk {
- impact := model.MediumImpact
+func (r *UnencryptedCommunicationRule) createRisk(input *types.ParsedModel, technicalAsset types.TechnicalAsset, dataFlow types.CommunicationLink, highRisk bool, transferringAuthData bool) types.Risk {
+ impact := types.MediumImpact
if highRisk {
- impact = model.HighImpact
+ impact = types.HighImpact
}
- target := model.ParsedModelRoot.TechnicalAssets[dataFlow.TargetId]
+ target := input.TechnicalAssets[dataFlow.TargetId]
title := "Unencrypted Communication named " + dataFlow.Title + " between " + technicalAsset.Title + " and " + target.Title + ""
if transferringAuthData {
title += " transferring authentication data (like credentials, token, session-id, etc.)"
}
if dataFlow.VPN {
title += " (even VPN-protected connections need to encrypt their data in-transit when confidentiality is " +
- "rated " + model.StrictlyConfidential.String() + " or integrity is rated " + model.MissionCritical.String() + ")"
+ "rated " + types.StrictlyConfidential.String() + " or integrity is rated " + types.MissionCritical.String() + ")"
}
- likelihood := model.Unlikely
- if dataFlow.IsAcrossTrustBoundaryNetworkOnly() {
- likelihood = model.Likely
+ likelihood := types.Unlikely
+ if dataFlow.IsAcrossTrustBoundaryNetworkOnly(input) {
+ likelihood = types.Likely
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: dataFlow.Id,
- DataBreachProbability: model.Possible,
+ DataBreachProbability: types.Possible,
DataBreachTechnicalAssetIDs: []string{target.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + dataFlow.Id + "@" + technicalAsset.Id + "@" + target.Id
+ risk.SyntheticId = risk.CategoryId + "@" + dataFlow.Id + "@" + technicalAsset.Id + "@" + target.Id
return risk
}
-func isHighSensitivity(dataAsset model.DataAsset) bool {
- return dataAsset.Confidentiality == model.StrictlyConfidential || dataAsset.Integrity == model.MissionCritical
+func isHighSensitivity(dataAsset types.DataAsset) bool {
+ return dataAsset.Confidentiality == types.StrictlyConfidential || dataAsset.Integrity == types.MissionCritical
}
-func isMediumSensitivity(dataAsset model.DataAsset) bool {
- return dataAsset.Confidentiality == model.Confidential || dataAsset.Integrity == model.Critical
+func isMediumSensitivity(dataAsset types.DataAsset) bool {
+ return dataAsset.Confidentiality == types.Confidential || dataAsset.Integrity == types.Critical
}
diff --git a/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go b/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go
new file mode 100644
index 00000000..9e1ec39d
--- /dev/null
+++ b/pkg/security/risks/builtin/unguarded-access-from-internet-rule.go
@@ -0,0 +1,112 @@
+package builtin
+
+import (
+ "sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type UnguardedAccessFromInternetRule struct{}
+
+func NewUnguardedAccessFromInternetRule() *UnguardedAccessFromInternetRule {
+ return &UnguardedAccessFromInternetRule{}
+}
+
+func (*UnguardedAccessFromInternetRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "unguarded-access-from-internet",
+ Title: "Unguarded Access From Internet",
+ Description: "Internet-exposed assets must be guarded by a protecting service, application, " +
+ "or reverse-proxy.",
+ Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between " +
+ "due to them being directly exposed on the internet.",
+ ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ Action: "Encapsulation of Technical Asset",
+ Mitigation: "Encapsulate the asset behind a guarding service, application, or reverse-proxy. " +
+ "For admin maintenance a bastion-host should be used as a jump-server. " +
+ "For file transfer a store-and-forward-host should be used as an indirect file exchange platform.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope technical assets (excluding " + types.LoadBalancer.String() + ") with confidentiality rating " +
+ "of " + types.Confidential.String() + " (or higher) or with integrity rating of " + types.Critical.String() + " (or higher) when " +
+ "accessed directly from the internet. All " +
+ types.WebServer.String() + ", " + types.WebApplication.String() + ", " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", and " + types.Gateway.String() + " assets are exempted from this risk when " +
+ "they do not consist of custom developed code and " +
+ "the data-flow only consists of HTTP or FTP protocols. Access from " + types.Monitoring.String() + " systems " +
+ "as well as VPN-protected connections are exempted.",
+ RiskAssessment: "The matching technical assets are at " + types.LowSeverity.String() + " risk. When either the " +
+ "confidentiality rating is " + types.StrictlyConfidential.String() + " or the integrity rating " +
+ "is " + types.MissionCritical.String() + ", the risk-rating is considered " + types.MediumSeverity.String() + ". " +
+ "For assets with RAA values higher than 40 % the risk-rating increases.",
+ FalsePositives: "When other means of filtering client requests are applied equivalent of " + types.ReverseProxy.String() + ", " + types.WAF.String() + ", or " + types.Gateway.String() + " components.",
+ ModelFailurePossibleReason: false,
+ CWE: 501,
+ }
+}
+
+func (*UnguardedAccessFromInternetRule) SupportedTags() []string {
+ return []string{}
+}
+
+func (r *UnguardedAccessFromInternetRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope {
+ commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ sort.Sort(types.ByTechnicalCommunicationLinkIdSort(commLinks))
+ for _, incomingAccess := range commLinks {
+ if technicalAsset.Technology != types.LoadBalancer {
+ if !technicalAsset.CustomDevelopedParts {
+ if (technicalAsset.Technology == types.WebServer || technicalAsset.Technology == types.WebApplication || technicalAsset.Technology == types.ReverseProxy || technicalAsset.Technology == types.WAF || technicalAsset.Technology == types.Gateway) &&
+ (incomingAccess.Protocol == types.HTTP || incomingAccess.Protocol == types.HTTPS) {
+ continue
+ }
+ if technicalAsset.Technology == types.Gateway &&
+ (incomingAccess.Protocol == types.FTP || incomingAccess.Protocol == types.FTPS || incomingAccess.Protocol == types.SFTP) {
+ continue
+ }
+ }
+ if input.TechnicalAssets[incomingAccess.SourceId].Technology == types.Monitoring ||
+ incomingAccess.VPN {
+ continue
+ }
+ if technicalAsset.Confidentiality >= types.Confidential || technicalAsset.Integrity >= types.Critical {
+ sourceAsset := input.TechnicalAssets[incomingAccess.SourceId]
+ if sourceAsset.Internet {
+ highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical
+ risks = append(risks, r.createRisk(technicalAsset, incomingAccess,
+ input.TechnicalAssets[incomingAccess.SourceId], highRisk))
+ }
+ }
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func (r *UnguardedAccessFromInternetRule) createRisk(dataStore types.TechnicalAsset, dataFlow types.CommunicationLink,
+ clientFromInternet types.TechnicalAsset, moreRisky bool) types.Risk {
+ impact := types.LowImpact
+ if moreRisky || dataStore.RAA > 40 {
+ impact = types.MediumImpact
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.VeryLikely, impact),
+ ExploitationLikelihood: types.VeryLikely,
+ ExploitationImpact: impact,
+ Title: "Unguarded Access from Internet of " + dataStore.Title + " by " +
+ clientFromInternet.Title + "" + " via " + dataFlow.Title + "",
+ MostRelevantTechnicalAssetId: dataStore.Id,
+ MostRelevantCommunicationLinkId: dataFlow.Id,
+ DataBreachProbability: types.Possible,
+ DataBreachTechnicalAssetIDs: []string{dataStore.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + dataStore.Id + "@" + clientFromInternet.Id + "@" + dataFlow.Id
+ return risk
+}
diff --git a/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go b/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go
new file mode 100644
index 00000000..42e1d600
--- /dev/null
+++ b/pkg/security/risks/builtin/unguarded-direct-datastore-access-rule.go
@@ -0,0 +1,121 @@
+package builtin
+
+import (
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type UnguardedDirectDatastoreAccessRule struct{}
+
+func NewUnguardedDirectDatastoreAccessRule() *UnguardedDirectDatastoreAccessRule {
+ return &UnguardedDirectDatastoreAccessRule{}
+}
+
+func (*UnguardedDirectDatastoreAccessRule) Category() types.RiskCategory {
+ return types.RiskCategory{
+ Id: "unguarded-direct-datastore-access",
+ Title: "Unguarded Direct Datastore Access",
+ Description: "Data stores accessed across trust boundaries must be guarded by some protecting service or application.",
+ Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive data stores without any protecting components in-between.",
+ ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
+ CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ Action: "Encapsulation of Datastore",
+ Mitigation: "Encapsulate the datastore access behind a guarding service or application.",
+ Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "In-scope technical assets of type " + types.Datastore.String() + " (except " + types.IdentityStoreLDAP.String() + " when accessed from " + types.IdentityProvider.String() + " and " + types.FileServer.String() + " when accessed via file transfer protocols) with confidentiality rating " +
+ "of " + types.Confidential.String() + " (or higher) or with integrity rating of " + types.Critical.String() + " (or higher) " +
+ "which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.", // TODO new rule "missing bastion host"?
+ RiskAssessment: "The matching technical assets are at " + types.LowSeverity.String() + " risk. When either the " +
+ "confidentiality rating is " + types.StrictlyConfidential.String() + " or the integrity rating " +
+ "is " + types.MissionCritical.String() + ", the risk-rating is considered " + types.MediumSeverity.String() + ". " +
+ "For assets with RAA values higher than 40 % the risk-rating increases.",
+ FalsePositives: "When the caller is considered fully trusted as if it was part of the datastore itself.",
+ ModelFailurePossibleReason: false,
+ CWE: 501,
+ }
+}
+
+func (*UnguardedDirectDatastoreAccessRule) SupportedTags() []string {
+ return []string{}
+}
+
+// check for data stores that should not be accessed directly across trust boundaries
+
+func (r *UnguardedDirectDatastoreAccessRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
+ if !technicalAsset.OutOfScope && technicalAsset.Type == types.Datastore {
+ for _, incomingAccess := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
+ sourceAsset := input.TechnicalAssets[incomingAccess.SourceId]
+ if (technicalAsset.Technology == types.IdentityStoreLDAP || technicalAsset.Technology == types.IdentityStoreDatabase) &&
+ sourceAsset.Technology == types.IdentityProvider {
+ continue
+ }
+ if technicalAsset.Confidentiality >= types.Confidential || technicalAsset.Integrity >= types.Critical {
+ if incomingAccess.IsAcrossTrustBoundaryNetworkOnly(input) && !fileServerAccessViaFTP(technicalAsset, incomingAccess) &&
+ incomingAccess.Usage != types.DevOps && !isSharingSameParentTrustBoundary(input, technicalAsset, sourceAsset) {
+ highRisk := technicalAsset.Confidentiality == types.StrictlyConfidential ||
+ technicalAsset.Integrity == types.MissionCritical
+ risks = append(risks, r.createRisk(technicalAsset, incomingAccess,
+ input.TechnicalAssets[incomingAccess.SourceId], highRisk))
+ }
+ }
+ }
+ }
+ }
+ return risks
+}
+
+func isSharingSameParentTrustBoundary(input *types.ParsedModel, left, right types.TechnicalAsset) bool {
+ tbIDLeft, tbIDRight := left.GetTrustBoundaryId(input), right.GetTrustBoundaryId(input)
+ if len(tbIDLeft) == 0 && len(tbIDRight) > 0 {
+ return false
+ }
+ if len(tbIDLeft) > 0 && len(tbIDRight) == 0 {
+ return false
+ }
+ if len(tbIDLeft) == 0 && len(tbIDRight) == 0 {
+ return true
+ }
+ if tbIDLeft == tbIDRight {
+ return true
+ }
+ tbLeft, tbRight := input.TrustBoundaries[tbIDLeft], input.TrustBoundaries[tbIDRight]
+ tbParentsLeft, tbParentsRight := tbLeft.AllParentTrustBoundaryIDs(input), tbRight.AllParentTrustBoundaryIDs(input)
+ for _, parentLeft := range tbParentsLeft {
+ for _, parentRight := range tbParentsRight {
+ if parentLeft == parentRight {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func fileServerAccessViaFTP(technicalAsset types.TechnicalAsset, incomingAccess types.CommunicationLink) bool {
+ return technicalAsset.Technology == types.FileServer &&
+ (incomingAccess.Protocol == types.FTP || incomingAccess.Protocol == types.FTPS || incomingAccess.Protocol == types.SFTP)
+}
+
+func (r *UnguardedDirectDatastoreAccessRule) createRisk(dataStore types.TechnicalAsset, dataFlow types.CommunicationLink, clientOutsideTrustBoundary types.TechnicalAsset, moreRisky bool) types.Risk {
+ impact := types.LowImpact
+ if moreRisky || dataStore.RAA > 40 {
+ impact = types.MediumImpact
+ }
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Likely, impact),
+ ExploitationLikelihood: types.Likely,
+ ExploitationImpact: impact,
+ Title: "Unguarded Direct Datastore Access of " + dataStore.Title + " by " +
+ clientOutsideTrustBoundary.Title + " via " + dataFlow.Title + "",
+ MostRelevantTechnicalAssetId: dataStore.Id,
+ MostRelevantCommunicationLinkId: dataFlow.Id,
+ DataBreachProbability: types.Improbable,
+ DataBreachTechnicalAssetIDs: []string{dataStore.Id},
+ }
+ risk.SyntheticId = risk.CategoryId + "@" + dataFlow.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataStore.Id
+ return risk
+}
diff --git a/risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go b/pkg/security/risks/builtin/unnecessary-communication-link-rule.go
similarity index 54%
rename from risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go
rename to pkg/security/risks/builtin/unnecessary-communication-link-rule.go
index 80843a32..66ed716b 100644
--- a/risks/built-in/unnecessary-communication-link/unnecessary-communication-link-rule.go
+++ b/pkg/security/risks/builtin/unnecessary-communication-link-rule.go
@@ -1,11 +1,17 @@
-package unnecessary_communication_link
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UnnecessaryCommunicationLinkRule struct{}
+
+func NewUnnecessaryCommunicationLinkRule() *UnnecessaryCommunicationLinkRule {
+ return &UnnecessaryCommunicationLinkRule{}
+}
+
+func (*UnnecessaryCommunicationLinkRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unnecessary-communication-link",
Title: "Unnecessary Communication Link",
Description: "When a technical communication link does not send or receive any data assets, this is " +
@@ -16,28 +22,28 @@ func Category() model.RiskCategory {
Action: "Attack Surface Reduction",
Mitigation: "Try to avoid using technical communication links that do not send or receive anything.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "In-scope technical assets' technical communication links not sending or receiving any data assets.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*UnnecessaryCommunicationLinkRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *UnnecessaryCommunicationLinkRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
for _, commLink := range technicalAsset.CommunicationLinks {
if len(commLink.DataAssetsSent) == 0 && len(commLink.DataAssetsReceived) == 0 {
- if !technicalAsset.OutOfScope || !model.ParsedModelRoot.TechnicalAssets[commLink.TargetId].OutOfScope {
- risks = append(risks, createRisk(technicalAsset, commLink))
+ if !technicalAsset.OutOfScope || !input.TechnicalAssets[commLink.TargetId].OutOfScope {
+ risks = append(risks, r.createRisk(technicalAsset, commLink))
}
}
}
@@ -45,19 +51,19 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink) model.Risk {
+func (r *UnnecessaryCommunicationLinkRule) createRisk(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink) types.Risk {
title := "Unnecessary Communication Link titled " + commLink.Title + " at technical asset " + technicalAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: commLink.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + commLink.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + commLink.Id + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go b/pkg/security/risks/builtin/unnecessary-data-asset-rule.go
similarity index 58%
rename from risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go
rename to pkg/security/risks/builtin/unnecessary-data-asset-rule.go
index 2af7c618..dcb6cc3d 100644
--- a/risks/built-in/unnecessary-data-asset/unnecessary-data-asset-rule.go
+++ b/pkg/security/risks/builtin/unnecessary-data-asset-rule.go
@@ -1,15 +1,22 @@
-package unnecessary_data_asset
+package builtin
import (
- "github.com/threagile/threagile/model"
"sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UnnecessaryDataAssetRule struct{}
+
+func NewUnnecessaryDataAssetRule() *UnnecessaryDataAssetRule {
+ return &UnnecessaryDataAssetRule{}
+}
+
+func (*UnnecessaryDataAssetRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unnecessary-data-asset",
Title: "Unnecessary Data Asset",
- Description: "When a data asset is not processed or stored by any data assets and also not transferred by any " +
+ Description: "When a data asset is not processed by any data assets and also not transferred by any " +
"communication links, this is an indicator for an unnecessary data asset (or for an incomplete model).",
Impact: "If this risk is unmitigated, attackers might be able to access unnecessary data assets using " +
"other vulnerabilities.",
@@ -18,30 +25,30 @@ func Category() model.RiskCategory {
Action: "Attack Surface Reduction",
Mitigation: "Try to avoid having data assets that are not required/used.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "Modelled data assets not processed or stored by any data assets and also not transferred by any " +
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
+ DetectionLogic: "Modelled data assets not processed by any data assets and also not transferred by any " +
"communication links.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*UnnecessaryDataAssetRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
+func (r *UnnecessaryDataAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
// first create them in memory - otherwise in Go ranging over map is random order
// range over them in sorted (hence re-producible) way:
unusedDataAssetIDs := make(map[string]bool)
- for k := range model.ParsedModelRoot.DataAssets {
+ for k := range input.DataAssets {
unusedDataAssetIDs[k] = true
}
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
+ for _, technicalAsset := range input.TechnicalAssets {
for _, processedDataAssetID := range technicalAsset.DataAssetsProcessed {
delete(unusedDataAssetIDs, processedDataAssetID)
}
@@ -63,24 +70,24 @@ func GenerateRisks() []model.Risk {
}
sort.Strings(keys)
for _, unusedDataAssetID := range keys {
- risks = append(risks, createRisk(unusedDataAssetID))
+ risks = append(risks, r.createRisk(input, unusedDataAssetID))
}
return risks
}
-func createRisk(unusedDataAssetID string) model.Risk {
- unusedDataAsset := model.ParsedModelRoot.DataAssets[unusedDataAssetID]
+func (r *UnnecessaryDataAssetRule) createRisk(input *types.ParsedModel, unusedDataAssetID string) types.Risk {
+ unusedDataAsset := input.DataAssets[unusedDataAssetID]
title := "Unnecessary Data Asset named " + unusedDataAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantDataAssetId: unusedDataAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{unusedDataAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + unusedDataAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + unusedDataAsset.Id
return risk
}
diff --git a/risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go b/pkg/security/risks/builtin/unnecessary-data-transfer-rule.go
similarity index 56%
rename from risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go
rename to pkg/security/risks/builtin/unnecessary-data-transfer-rule.go
index f33c58a0..51a36a48 100644
--- a/risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go
+++ b/pkg/security/risks/builtin/unnecessary-data-transfer-rule.go
@@ -1,12 +1,19 @@
-package unnecessary_data_transfer
+package builtin
import (
- "github.com/threagile/threagile/model"
"sort"
+
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UnnecessaryDataTransferRule struct{}
+
+func NewUnnecessaryDataTransferRule() *UnnecessaryDataTransferRule {
+ return &UnnecessaryDataTransferRule{}
+}
+
+func (*UnnecessaryDataTransferRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unnecessary-data-transfer",
Title: "Unnecessary Data Transfer",
Description: "When a technical asset sends or receives data assets, which it neither processes or stores this is " +
@@ -17,69 +24,69 @@ func Category() model.RiskCategory {
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
Action: "Attack Surface Reduction",
Mitigation: "Try to avoid sending or receiving sensitive data assets which are not required (i.e. neither " +
- "processed or stored) by the involved technical asset.",
+ "processed) by the involved technical asset.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "In-scope technical assets sending or receiving sensitive data assets which are neither processed nor " +
"stored by the technical asset are flagged with this risk. The risk rating (low or medium) depends on the " +
"confidentiality, integrity, and availability rating of the technical asset. Monitoring data is exempted from this risk.",
RiskAssessment: "The risk assessment is depending on the confidentiality and integrity rating of the transferred data asset " +
- "either " + model.LowSeverity.String() + " or " + model.MediumSeverity.String() + ".",
+ "either " + types.LowSeverity.String() + " or " + types.MediumSeverity.String() + ".",
FalsePositives: "Technical assets missing the model entries of either processing or storing the mentioned data assets " +
"can be considered as false positives (incomplete models) after individual review. These should then be addressed by " +
- "completing the model so that all necessary data assets are processed and/or stored by the technical asset involved.",
+ "completing the model so that all necessary data assets are processed by the technical asset involved.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*UnnecessaryDataTransferRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *UnnecessaryDataTransferRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.OutOfScope {
continue
}
// outgoing data flows
for _, outgoingDataFlow := range technicalAsset.CommunicationLinks {
- targetAsset := model.ParsedModelRoot.TechnicalAssets[outgoingDataFlow.TargetId]
+ targetAsset := input.TechnicalAssets[outgoingDataFlow.TargetId]
if targetAsset.Technology.IsUnnecessaryDataTolerated() {
continue
}
- risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, outgoingDataFlow, false)
+ risks = r.checkRisksAgainstTechnicalAsset(input, risks, technicalAsset, outgoingDataFlow, false)
}
// incoming data flows
- commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- sort.Sort(model.ByTechnicalCommunicationLinkIdSort(commLinks))
+ commLinks := input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
+ sort.Sort(types.ByTechnicalCommunicationLinkIdSort(commLinks))
for _, incomingDataFlow := range commLinks {
- targetAsset := model.ParsedModelRoot.TechnicalAssets[incomingDataFlow.SourceId]
+ targetAsset := input.TechnicalAssets[incomingDataFlow.SourceId]
if targetAsset.Technology.IsUnnecessaryDataTolerated() {
continue
}
- risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, incomingDataFlow, true)
+ risks = r.checkRisksAgainstTechnicalAsset(input, risks, technicalAsset, incomingDataFlow, true)
}
}
return risks
}
-func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.TechnicalAsset,
- dataFlow model.CommunicationLink, inverseDirection bool) []model.Risk {
+func (r *UnnecessaryDataTransferRule) checkRisksAgainstTechnicalAsset(input *types.ParsedModel, risks []types.Risk, technicalAsset types.TechnicalAsset,
+ dataFlow types.CommunicationLink, inverseDirection bool) []types.Risk {
for _, transferredDataAssetId := range dataFlow.DataAssetsSent {
if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) {
- transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId]
+ transferredDataAsset := input.DataAssets[transferredDataAssetId]
//fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" sent via "+dataFlow.Id+"\n")
- if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical {
+ if transferredDataAsset.Confidentiality >= types.Confidential || transferredDataAsset.Integrity >= types.Critical {
commPartnerId := dataFlow.TargetId
if inverseDirection {
commPartnerId = dataFlow.SourceId
}
- commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId]
- risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset)
+ commPartnerAsset := input.TechnicalAssets[commPartnerId]
+ risk := r.createRisk(technicalAsset, transferredDataAsset, commPartnerAsset)
if isNewRisk(risks, risk) {
risks = append(risks, risk)
}
@@ -88,15 +95,15 @@ func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.Te
}
for _, transferredDataAssetId := range dataFlow.DataAssetsReceived {
if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) {
- transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId]
+ transferredDataAsset := input.DataAssets[transferredDataAssetId]
//fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" received via "+dataFlow.Id+"\n")
- if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical {
+ if transferredDataAsset.Confidentiality >= types.Confidential || transferredDataAsset.Integrity >= types.Critical {
commPartnerId := dataFlow.TargetId
if inverseDirection {
commPartnerId = dataFlow.SourceId
}
- commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId]
- risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset)
+ commPartnerAsset := input.TechnicalAssets[commPartnerId]
+ risk := r.createRisk(technicalAsset, transferredDataAsset, commPartnerAsset)
if isNewRisk(risks, risk) {
risks = append(risks, risk)
}
@@ -106,7 +113,7 @@ func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.Te
return risks
}
-func isNewRisk(risks []model.Risk, risk model.Risk) bool {
+func isNewRisk(risks []types.Risk, risk types.Risk) bool {
for _, check := range risks {
if check.SyntheticId == risk.SyntheticId {
return false
@@ -115,27 +122,27 @@ func isNewRisk(risks []model.Risk, risk model.Risk) bool {
return true
}
-func createRisk(technicalAsset model.TechnicalAsset, dataAssetTransferred model.DataAsset, commPartnerAsset model.TechnicalAsset) model.Risk {
- moreRisky := dataAssetTransferred.Confidentiality == model.StrictlyConfidential || dataAssetTransferred.Integrity == model.MissionCritical
+func (r *UnnecessaryDataTransferRule) createRisk(technicalAsset types.TechnicalAsset, dataAssetTransferred types.DataAsset, commPartnerAsset types.TechnicalAsset) types.Risk {
+ moreRisky := dataAssetTransferred.Confidentiality == types.StrictlyConfidential || dataAssetTransferred.Integrity == types.MissionCritical
- impact := model.LowImpact
+ impact := types.LowImpact
if moreRisky {
- impact = model.MediumImpact
+ impact = types.MediumImpact
}
title := "Unnecessary Data Transfer of " + dataAssetTransferred.Title + " data at " + technicalAsset.Title + " " +
"from/to " + commPartnerAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, impact),
+ ExploitationLikelihood: types.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantDataAssetId: dataAssetTransferred.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + dataAssetTransferred.Id + "@" + technicalAsset.Id + "@" + commPartnerAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + dataAssetTransferred.Id + "@" + technicalAsset.Id + "@" + commPartnerAsset.Id
return risk
}
diff --git a/risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go b/pkg/security/risks/builtin/unnecessary-technical-asset-rule.go
similarity index 53%
rename from risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go
rename to pkg/security/risks/builtin/unnecessary-technical-asset-rule.go
index 012117e8..c075407e 100644
--- a/risks/built-in/unnecessary-technical-asset/unnecessary-technical-asset-rule.go
+++ b/pkg/security/risks/builtin/unnecessary-technical-asset-rule.go
@@ -1,14 +1,20 @@
-package unnecessary_technical_asset
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UnnecessaryTechnicalAssetRule struct{}
+
+func NewUnnecessaryTechnicalAssetRule() *UnnecessaryTechnicalAssetRule {
+ return &UnnecessaryTechnicalAssetRule{}
+}
+
+func (*UnnecessaryTechnicalAssetRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "unnecessary-technical-asset",
Title: "Unnecessary Technical Asset",
- Description: "When a technical asset does not process or store any data assets, this is " +
+ Description: "When a technical asset does not process any data assets, this is " +
"an indicator for an unnecessary technical asset (or for an incomplete model). " +
"This is also the case if the asset has no communication links (either outgoing or incoming).",
Impact: "If this risk is unmitigated, attackers might be able to target unnecessary technical assets.",
@@ -17,44 +23,44 @@ func Category() model.RiskCategory {
Action: "Attack Surface Reduction",
Mitigation: "Try to avoid using technical assets that do not process or store anything.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "Technical assets not processing or storing any data assets.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*UnnecessaryTechnicalAssetRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *UnnecessaryTechnicalAssetRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if len(technicalAsset.DataAssetsProcessed) == 0 && len(technicalAsset.DataAssetsStored) == 0 ||
- (len(technicalAsset.CommunicationLinks) == 0 && len(model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]) == 0) {
- risks = append(risks, createRisk(technicalAsset))
+ (len(technicalAsset.CommunicationLinks) == 0 && len(input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]) == 0) {
+ risks = append(risks, r.createRisk(technicalAsset))
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *UnnecessaryTechnicalAssetRule) createRisk(technicalAsset types.TechnicalAsset) types.Risk {
title := "Unnecessary Technical Asset named " + technicalAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go b/pkg/security/risks/builtin/untrusted-deserialization-rule.go
similarity index 59%
rename from risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go
rename to pkg/security/risks/builtin/untrusted-deserialization-rule.go
index 5da7f2db..d6e7aeb7 100644
--- a/risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go
+++ b/pkg/security/risks/builtin/untrusted-deserialization-rule.go
@@ -1,11 +1,17 @@
-package untrusted_deserialization
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type UntrustedDeserializationRule struct{}
+
+func NewUntrustedDeserializationRule() *UntrustedDeserializationRule {
+ return &UntrustedDeserializationRule{}
+}
+
+func (*UntrustedDeserializationRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "untrusted-deserialization",
Title: "Untrusted Deserialization",
Description: "When a technical asset accepts data in a specific serialized form (like Java or .NET serialization), " +
@@ -21,10 +27,10 @@ func Category() model.RiskCategory {
"Alternatively a strict whitelisting approach of the classes/types/values to deserialize might help as well. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Tampering,
+ Function: types.Architecture,
+ STRIDE: types.Tampering,
DetectionLogic: "In-scope technical assets accepting serialization data formats (including EJB and RMI protocols).",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed.",
FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) data deserialized can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
@@ -32,68 +38,68 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*UntrustedDeserializationRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *UntrustedDeserializationRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.OutOfScope {
continue
}
hasOne, acrossTrustBoundary := false, false
commLinkTitle := ""
for _, format := range technicalAsset.DataFormatsAccepted {
- if format == model.Serialization {
+ if format == types.Serialization {
hasOne = true
}
}
- if technicalAsset.Technology == model.EJB {
+ if technicalAsset.Technology == types.EJB {
hasOne = true
}
// check for any incoming IIOP and JRMP protocols
- for _, commLink := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
- if commLink.Protocol == model.IIOP || commLink.Protocol == model.IIOP_encrypted ||
- commLink.Protocol == model.JRMP || commLink.Protocol == model.JRMP_encrypted {
+ for _, commLink := range input.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
+ if commLink.Protocol == types.IIOP || commLink.Protocol == types.IiopEncrypted ||
+ commLink.Protocol == types.JRMP || commLink.Protocol == types.JrmpEncrypted {
hasOne = true
- if commLink.IsAcrossTrustBoundaryNetworkOnly() {
+ if commLink.IsAcrossTrustBoundaryNetworkOnly(input) {
acrossTrustBoundary = true
commLinkTitle = commLink.Title
}
}
}
if hasOne {
- risks = append(risks, createRisk(technicalAsset, acrossTrustBoundary, commLinkTitle))
+ risks = append(risks, r.createRisk(input, technicalAsset, acrossTrustBoundary, commLinkTitle))
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, acrossTrustBoundary bool, commLinkTitle string) model.Risk {
+func (r *UntrustedDeserializationRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset, acrossTrustBoundary bool, commLinkTitle string) types.Risk {
title := "Untrusted Deserialization risk at " + technicalAsset.Title + ""
- impact := model.HighImpact
- likelihood := model.Likely
+ impact := types.HighImpact
+ likelihood := types.Likely
if acrossTrustBoundary {
- likelihood = model.VeryLikely
+ likelihood = types.VeryLikely
title += " across a trust boundary (at least via communication link " + commLinkTitle + ")"
}
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.VeryHighImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.VeryHighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go b/pkg/security/risks/builtin/wrong-communication-link-content-rule.go
similarity index 50%
rename from risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go
rename to pkg/security/risks/builtin/wrong-communication-link-content-rule.go
index 3464e490..7f4997c7 100644
--- a/risks/built-in/wrong-communication-link-content/wrong-communication-link-content-rule.go
+++ b/pkg/security/risks/builtin/wrong-communication-link-content-rule.go
@@ -1,11 +1,17 @@
-package wrong_communication_link_content
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type WrongCommunicationLinkContentRule struct{}
+
+func NewWrongCommunicationLinkContentRule() *WrongCommunicationLinkContentRule {
+ return &WrongCommunicationLinkContentRule{}
+}
+
+func (*WrongCommunicationLinkContentRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "wrong-communication-link-content",
Title: "Wrong Communication Link Content",
Description: "When a communication link is defined as readonly, but does not receive any data asset, " +
@@ -17,69 +23,69 @@ func Category() model.RiskCategory {
Mitigation: "Try to model the correct readonly flag and/or data sent/received of communication links. " +
"Also try to use communication link types matching the target technology/machine types.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.InformationDisclosure,
+ Function: types.Architecture,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "Communication links with inconsistent data assets being sent/received not matching their readonly flag or otherwise inconsistent protocols not matching the target technology type.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*WrongCommunicationLinkContentRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, techAsset := range model.ParsedModelRoot.TechnicalAssets {
+func (r *WrongCommunicationLinkContentRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, techAsset := range input.TechnicalAssets {
for _, commLink := range techAsset.CommunicationLinks {
// check readonly consistency
if commLink.Readonly {
if len(commLink.DataAssetsReceived) == 0 {
- risks = append(risks, createRisk(techAsset, commLink,
+ risks = append(risks, r.createRisk(techAsset, commLink,
"(data assets sent/received not matching the communication link's readonly flag)"))
}
} else {
if len(commLink.DataAssetsSent) == 0 {
- risks = append(risks, createRisk(techAsset, commLink,
+ risks = append(risks, r.createRisk(techAsset, commLink,
"(data assets sent/received not matching the communication link's readonly flag)"))
}
}
// check for protocol inconsistencies
- targetAsset := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId]
- if commLink.Protocol == model.InProcessLibraryCall && targetAsset.Technology != model.Library {
- risks = append(risks, createRisk(techAsset, commLink,
- "(protocol type \""+model.InProcessLibraryCall.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+model.Library.String()+"\")"))
+ targetAsset := input.TechnicalAssets[commLink.TargetId]
+ if commLink.Protocol == types.InProcessLibraryCall && targetAsset.Technology != types.Library {
+ risks = append(risks, r.createRisk(techAsset, commLink,
+ "(protocol type \""+types.InProcessLibraryCall.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+types.Library.String()+"\")"))
}
- if commLink.Protocol == model.LocalFileAccess && targetAsset.Technology != model.LocalFileSystem {
- risks = append(risks, createRisk(techAsset, commLink,
- "(protocol type \""+model.LocalFileAccess.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+model.LocalFileSystem.String()+"\")"))
+ if commLink.Protocol == types.LocalFileAccess && targetAsset.Technology != types.LocalFileSystem {
+ risks = append(risks, r.createRisk(techAsset, commLink,
+ "(protocol type \""+types.LocalFileAccess.String()+"\" does not match target technology type \""+targetAsset.Technology.String()+"\": expected \""+types.LocalFileSystem.String()+"\")"))
}
- if commLink.Protocol == model.ContainerSpawning && targetAsset.Machine != model.Container {
- risks = append(risks, createRisk(techAsset, commLink,
- "(protocol type \""+model.ContainerSpawning.String()+"\" does not match target machine type \""+targetAsset.Machine.String()+"\": expected \""+model.Container.String()+"\")"))
+ if commLink.Protocol == types.ContainerSpawning && targetAsset.Machine != types.Container {
+ risks = append(risks, r.createRisk(techAsset, commLink,
+ "(protocol type \""+types.ContainerSpawning.String()+"\" does not match target machine type \""+targetAsset.Machine.String()+"\": expected \""+types.Container.String()+"\")"))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset, commLink model.CommunicationLink, reason string) model.Risk {
+func (r *WrongCommunicationLinkContentRule) createRisk(technicalAsset types.TechnicalAsset, commLink types.CommunicationLink, reason string) types.Risk {
title := "Wrong Communication Link Content " + reason + " at " + technicalAsset.Title + " " +
"regarding communication link " + commLink.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantCommunicationLinkId: commLink.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id + "@" + commLink.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id + "@" + commLink.Id
return risk
}
diff --git a/risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go b/pkg/security/risks/builtin/wrong-trust-boundary-content.go
similarity index 50%
rename from risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go
rename to pkg/security/risks/builtin/wrong-trust-boundary-content.go
index 95801845..81959f8a 100644
--- a/risks/built-in/wrong-trust-boundary-content/wrong-trust-boundary-content.go
+++ b/pkg/security/risks/builtin/wrong-trust-boundary-content.go
@@ -1,14 +1,20 @@
-package wrong_trust_boundary_content
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type WrongTrustBoundaryContentRule struct{}
+
+func NewWrongTrustBoundaryContentRule() *WrongTrustBoundaryContentRule {
+ return &WrongTrustBoundaryContentRule{}
+}
+
+func (*WrongTrustBoundaryContentRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "wrong-trust-boundary-content",
Title: "Wrong Trust Boundary Content",
- Description: "When a trust boundary of type " + model.NetworkPolicyNamespaceIsolation.String() + " contains " +
+ Description: "When a trust boundary of type " + types.NetworkPolicyNamespaceIsolation.String() + " contains " +
"non-container assets it is likely to be a model failure.",
Impact: "If this potential model error is not fixed, some risks might not be visible.",
ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
@@ -16,28 +22,28 @@ func Category() model.RiskCategory {
Action: "Model Consistency",
Mitigation: "Try to model the correct types of trust boundaries and data assets.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
+ Function: types.Architecture,
+ STRIDE: types.ElevationOfPrivilege,
DetectionLogic: "Trust boundaries which should only contain containers, but have different assets inside.",
- RiskAssessment: model.LowSeverity.String(),
+ RiskAssessment: types.LowSeverity.String(),
FalsePositives: "Usually no false positives as this looks like an incomplete model.",
ModelFailurePossibleReason: true,
CWE: 1008,
}
}
-func SupportedTags() []string {
+func (*WrongTrustBoundaryContentRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries {
- if trustBoundary.Type == model.NetworkPolicyNamespaceIsolation {
+func (r *WrongTrustBoundaryContentRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, trustBoundary := range input.TrustBoundaries {
+ if trustBoundary.Type == types.NetworkPolicyNamespaceIsolation {
for _, techAssetID := range trustBoundary.TechnicalAssetsInside {
- techAsset := model.ParsedModelRoot.TechnicalAssets[techAssetID]
- if techAsset.Machine != model.Container && techAsset.Machine != model.Serverless {
- risks = append(risks, createRisk(techAsset))
+ techAsset := input.TechnicalAssets[techAssetID]
+ if techAsset.Machine != types.Container && techAsset.Machine != types.Serverless {
+ risks = append(risks, r.createRisk(techAsset))
}
}
}
@@ -45,18 +51,18 @@ func GenerateRisks() []model.Risk {
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *WrongTrustBoundaryContentRule) createRisk(technicalAsset types.TechnicalAsset) types.Risk {
title := "Wrong Trust Boundary Content (non-container asset inside container trust boundary) at " + technicalAsset.Title + ""
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, model.LowImpact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: model.LowImpact,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.Unlikely, types.LowImpact),
+ ExploitationLikelihood: types.Unlikely,
+ ExploitationImpact: types.LowImpact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
+ DataBreachProbability: types.Improbable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/risks/built-in/xml-external-entity/xml-external-entity-rule.go b/pkg/security/risks/builtin/xml-external-entity-rule.go
similarity index 59%
rename from risks/built-in/xml-external-entity/xml-external-entity-rule.go
rename to pkg/security/risks/builtin/xml-external-entity-rule.go
index e6e4778a..e548ea19 100644
--- a/risks/built-in/xml-external-entity/xml-external-entity-rule.go
+++ b/pkg/security/risks/builtin/xml-external-entity-rule.go
@@ -1,11 +1,17 @@
-package xml_external_entity
+package builtin
import (
- "github.com/threagile/threagile/model"
+ "github.com/threagile/threagile/pkg/security/types"
)
-func Category() model.RiskCategory {
- return model.RiskCategory{
+type XmlExternalEntityRule struct{}
+
+func NewXmlExternalEntityRule() *XmlExternalEntityRule {
+ return &XmlExternalEntityRule{}
+}
+
+func (*XmlExternalEntityRule) Category() types.RiskCategory {
+ return types.RiskCategory{
Id: "xml-external-entity",
Title: "XML External Entity (XXE)",
Description: "When a technical asset accepts data in XML format, XML External Entity (XXE) risks might arise.",
@@ -18,10 +24,10 @@ func Category() model.RiskCategory {
Mitigation: "Apply hardening of all XML parser instances in order to stay safe from XML External Entity (XXE) vulnerabilities. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Development,
- STRIDE: model.InformationDisclosure,
+ Function: types.Development,
+ STRIDE: types.InformationDisclosure,
DetectionLogic: "In-scope technical assets accepting XML data formats.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored. " +
+ RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed. " +
"Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF (and XXE vulnerabilities are often also SSRF vulnerabilities).",
FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) XML data can be considered " +
"as false positives after individual review.",
@@ -30,44 +36,44 @@ func Category() model.RiskCategory {
}
}
-func SupportedTags() []string {
+func (*XmlExternalEntityRule) SupportedTags() []string {
return []string{}
}
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
+func (r *XmlExternalEntityRule) GenerateRisks(input *types.ParsedModel) []types.Risk {
+ risks := make([]types.Risk, 0)
+ for _, id := range input.SortedTechnicalAssetIDs() {
+ technicalAsset := input.TechnicalAssets[id]
if technicalAsset.OutOfScope {
continue
}
for _, format := range technicalAsset.DataFormatsAccepted {
- if format == model.XML {
- risks = append(risks, createRisk(technicalAsset))
+ if format == types.XML {
+ risks = append(risks, r.createRisk(input, technicalAsset))
}
}
}
return risks
}
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
+func (r *XmlExternalEntityRule) createRisk(parsedModel *types.ParsedModel, technicalAsset types.TechnicalAsset) types.Risk {
title := "XML External Entity (XXE) risk at " + technicalAsset.Title + ""
- impact := model.MediumImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical ||
- technicalAsset.HighestAvailability() == model.MissionCritical {
- impact = model.HighImpact
+ impact := types.MediumImpact
+ if technicalAsset.HighestConfidentiality(parsedModel) == types.StrictlyConfidential ||
+ technicalAsset.HighestIntegrity(parsedModel) == types.MissionCritical ||
+ technicalAsset.HighestAvailability(parsedModel) == types.MissionCritical {
+ impact = types.HighImpact
}
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.VeryLikely, impact),
- ExploitationLikelihood: model.VeryLikely,
+ risk := types.Risk{
+ CategoryId: r.Category().Id,
+ Severity: types.CalculateSeverity(types.VeryLikely, impact),
+ ExploitationLikelihood: types.VeryLikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Probable,
+ DataBreachProbability: types.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: use the same logic here as for SSRF rule, as XXE is also SSRF ;)
}
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
+ risk.SyntheticId = risk.CategoryId + "@" + technicalAsset.Id
return risk
}
diff --git a/pkg/security/risks/risks.go b/pkg/security/risks/risks.go
new file mode 100644
index 00000000..33fae818
--- /dev/null
+++ b/pkg/security/risks/risks.go
@@ -0,0 +1,59 @@
+package risks
+
+import (
+ "github.com/threagile/threagile/pkg/security/risks/builtin"
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type RiskRule interface {
+ Category() types.RiskCategory
+ SupportedTags() []string
+ GenerateRisks(*types.ParsedModel) []types.Risk
+}
+
+func GetBuiltInRiskRules() []RiskRule {
+ return []RiskRule{
+ builtin.NewAccidentalSecretLeakRule(),
+ builtin.NewCodeBackdooringRule(),
+ builtin.NewContainerBaseImageBackdooringRule(),
+ builtin.NewContainerPlatformEscapeRule(),
+ builtin.NewCrossSiteRequestForgeryRule(),
+ builtin.NewCrossSiteScriptingRule(),
+ builtin.NewDosRiskyAccessAcrossTrustBoundaryRule(),
+ builtin.NewIncompleteModelRule(),
+ builtin.NewLdapInjectionRule(),
+ builtin.NewMissingAuthenticationRule(),
+ builtin.NewMissingAuthenticationSecondFactorRule(builtin.NewMissingAuthenticationRule()),
+ builtin.NewMissingBuildInfrastructureRule(),
+ builtin.NewMissingCloudHardeningRule(),
+ builtin.NewMissingFileValidationRule(),
+ builtin.NewMissingHardeningRule(),
+ builtin.NewMissingIdentityPropagationRule(),
+ builtin.NewMissingIdentityProviderIsolationRule(),
+ builtin.NewMissingIdentityStoreRule(),
+ builtin.NewMissingNetworkSegmentationRule(),
+ builtin.NewMissingVaultRule(),
+ builtin.NewMissingVaultIsolationRule(),
+ builtin.NewMissingWafRule(),
+ builtin.NewMixedTargetsOnSharedRuntimeRule(),
+ builtin.NewPathTraversalRule(),
+ builtin.NewPushInsteadPullDeploymentRule(),
+ builtin.NewSearchQueryInjectionRule(),
+ builtin.NewServerSideRequestForgeryRule(),
+ builtin.NewServiceRegistryPoisoningRule(),
+ builtin.NewSqlNoSqlInjectionRule(),
+ builtin.NewUncheckedDeploymentRule(),
+ builtin.NewUnencryptedAssetRule(),
+ builtin.NewUnencryptedCommunicationRule(),
+ builtin.NewUnguardedAccessFromInternetRule(),
+ builtin.NewUnguardedDirectDatastoreAccessRule(),
+ builtin.NewUnnecessaryCommunicationLinkRule(),
+ builtin.NewUnnecessaryDataAssetRule(),
+ builtin.NewUnnecessaryDataTransferRule(),
+ builtin.NewUnnecessaryTechnicalAssetRule(),
+ builtin.NewUntrustedDeserializationRule(),
+ builtin.NewWrongCommunicationLinkContentRule(),
+ builtin.NewWrongTrustBoundaryContentRule(),
+ builtin.NewXmlExternalEntityRule(),
+ }
+}
diff --git a/pkg/security/types/authentication.go b/pkg/security/types/authentication.go
new file mode 100644
index 00000000..95db6aeb
--- /dev/null
+++ b/pkg/security/types/authentication.go
@@ -0,0 +1,111 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Authentication int
+
+const (
+ NoneAuthentication Authentication = iota
+ Credentials
+ SessionId
+ Token
+ ClientCertificate
+ TwoFactor
+ Externalized
+)
+
+func AuthenticationValues() []TypeEnum {
+ return []TypeEnum{
+ NoneAuthentication,
+ Credentials,
+ SessionId,
+ Token,
+ ClientCertificate,
+ TwoFactor,
+ Externalized,
+ }
+}
+
+var AuthenticationTypeDescription = [...]TypeDescription{
+ {"none", "No authentication"},
+ {"credentials", "Username and password, pin or passphrase"},
+ {"session-id", "A server generated session id with limited life span"},
+ {"token", "A server generated token. Containing session id, other data and is cryptographically signed"},
+ {"client-certificate", "A certificate file stored on the client identifying this specific client"},
+ {"two-factor", "Credentials plus another factor like a physical object (card) or biometrics"},
+ {"externalized", "Some external company handles authentication"},
+}
+
+func ParseAuthentication(value string) (authentication Authentication, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range AuthenticationValues() {
+ if candidate.String() == value {
+ return candidate.(Authentication), err
+ }
+ }
+ return authentication, errors.New("Unable to parse into type: " + value)
+}
+
+func (what Authentication) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ //return [...]string{"none", "credentials", "session-id", "token", "client-certificate", "two-factor", "externalized"}[what]
+ return AuthenticationTypeDescription[what].Name
+}
+
+func (what Authentication) Explain() string {
+ return AuthenticationTypeDescription[what].Description
+}
+
+func (what Authentication) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Authentication) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Authentication) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Authentication) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Authentication) find(value string) (Authentication, error) {
+ for index, description := range AuthenticationTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Authentication(index), nil
+ }
+ }
+
+ return Authentication(0), fmt.Errorf("unknown authentication value %q", value)
+}
diff --git a/pkg/security/types/authentication_test.go b/pkg/security/types/authentication_test.go
new file mode 100644
index 00000000..d0849790
--- /dev/null
+++ b/pkg/security/types/authentication_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseAuthenticationTest struct {
+ input string
+ expected Authentication
+ expectedError error
+}
+
+func TestParseAuthentication(t *testing.T) {
+ testCases := map[string]ParseAuthenticationTest{
+ "none": {
+ input: "none",
+ expected: NoneAuthentication,
+ },
+ "credentials": {
+ input: "credentials",
+ expected: Credentials,
+ },
+ "session-id": {
+ input: "session-id",
+ expected: SessionId,
+ },
+ "token": {
+ input: "token",
+ expected: Token,
+ },
+ "client-certificate": {
+ input: "client-certificate",
+ expected: ClientCertificate,
+ },
+ "two-factor": {
+ input: "two-factor",
+ expected: TwoFactor,
+ },
+ "externalized": {
+ input: "externalized",
+ expected: Externalized,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseAuthentication(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/authorization.go b/pkg/security/types/authorization.go
new file mode 100644
index 00000000..eb6c0a87
--- /dev/null
+++ b/pkg/security/types/authorization.go
@@ -0,0 +1,98 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Authorization int
+
+const (
+ NoneAuthorization Authorization = iota
+ TechnicalUser
+ EndUserIdentityPropagation
+)
+
+func AuthorizationValues() []TypeEnum {
+ return []TypeEnum{
+ NoneAuthorization,
+ TechnicalUser,
+ EndUserIdentityPropagation,
+ }
+}
+
+var AuthorizationTypeDescription = [...]TypeDescription{
+ {"none", "No authorization"},
+ {"technical-user", "Technical user (service-to-service) like DB user credentials"},
+ {"enduser-identity-propagation", "Identity of end user propagates to this service"},
+}
+
+func ParseAuthorization(value string) (authorization Authorization, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range AuthorizationValues() {
+ if candidate.String() == value {
+ return candidate.(Authorization), err
+ }
+ }
+ return authorization, errors.New("Unable to parse into type: " + value)
+}
+
+func (what Authorization) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return AuthorizationTypeDescription[what].Name
+}
+
+func (what Authorization) Explain() string {
+ return AuthorizationTypeDescription[what].Description
+}
+
+func (what Authorization) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Authorization) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Authorization) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Authorization) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Authorization) find(value string) (Authorization, error) {
+ for index, description := range AuthorizationTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Authorization(index), nil
+ }
+ }
+
+ return Authorization(0), fmt.Errorf("unknown authorization value %q", value)
+}
diff --git a/pkg/security/types/authorization_test.go b/pkg/security/types/authorization_test.go
new file mode 100644
index 00000000..c4d274b4
--- /dev/null
+++ b/pkg/security/types/authorization_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseAuthorizationTest struct {
+ input string
+ expected Authorization
+ expectedError error
+}
+
+func TestParseAuthorization(t *testing.T) {
+ testCases := map[string]ParseAuthorizationTest{
+ "none": {
+ input: "none",
+ expected: NoneAuthorization,
+ },
+ "technical-user": {
+ input: "technical-user",
+ expected: TechnicalUser,
+ },
+ "enduser-identity-propagation": {
+ input: "enduser-identity-propagation",
+ expected: EndUserIdentityPropagation,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseAuthorization(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/communication_link.go b/pkg/security/types/communication_link.go
new file mode 100644
index 00000000..cabf4255
--- /dev/null
+++ b/pkg/security/types/communication_link.go
@@ -0,0 +1,144 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "sort"
+)
+
+type CommunicationLink struct {
+ Id string `json:"id,omitempty" yaml:"id,omitempty"`
+ SourceId string `json:"source_id,omitempty" yaml:"source_id,omitempty"`
+ TargetId string `json:"target_id,omitempty" yaml:"target_id,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ Protocol Protocol `json:"protocol,omitempty" yaml:"protocol,omitempty"`
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ VPN bool `json:"vpn,omitempty" yaml:"vpn,omitempty"`
+ IpFiltered bool `json:"ip_filtered,omitempty" yaml:"ip_filtered,omitempty"`
+ Readonly bool `json:"readonly,omitempty" yaml:"readonly,omitempty"`
+ Authentication Authentication `json:"authentication,omitempty" yaml:"authentication,omitempty"`
+ Authorization Authorization `json:"authorization,omitempty" yaml:"authorization,omitempty"`
+ Usage Usage `json:"usage,omitempty" yaml:"usage,omitempty"`
+ DataAssetsSent []string `json:"data_assets_sent,omitempty" yaml:"data_assets_sent,omitempty"`
+ DataAssetsReceived []string `json:"data_assets_received,omitempty" yaml:"data_assets_received,omitempty"`
+ DiagramTweakWeight int `json:"diagram_tweak_weight,omitempty" yaml:"diagram_tweak_weight,omitempty"`
+ DiagramTweakConstraint bool `json:"diagram_tweak_constraint,omitempty" yaml:"diagram_tweak_constraint,omitempty"`
+}
+
+func (what CommunicationLink) IsTaggedWithAny(tags ...string) bool {
+ return containsCaseInsensitiveAny(what.Tags, tags...)
+}
+
+func (what CommunicationLink) IsTaggedWithBaseTag(baseTag string) bool {
+ return IsTaggedWithBaseTag(what.Tags, baseTag)
+}
+
+func (what CommunicationLink) IsAcrossTrustBoundary(parsedModel *ParsedModel) bool {
+ trustBoundaryOfSourceAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId]
+ trustBoundaryOfTargetAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId]
+ return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id
+}
+
+func (what CommunicationLink) IsAcrossTrustBoundaryNetworkOnly(parsedModel *ParsedModel) bool {
+ trustBoundaryOfSourceAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.SourceId]
+ if !trustBoundaryOfSourceAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then
+ trustBoundaryOfSourceAsset = parsedModel.TrustBoundaries[trustBoundaryOfSourceAsset.ParentTrustBoundaryID(parsedModel)]
+ }
+ trustBoundaryOfTargetAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.TargetId]
+ if !trustBoundaryOfTargetAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then
+ trustBoundaryOfTargetAsset = parsedModel.TrustBoundaries[trustBoundaryOfTargetAsset.ParentTrustBoundaryID(parsedModel)]
+ }
+ return trustBoundaryOfSourceAsset.Id != trustBoundaryOfTargetAsset.Id && trustBoundaryOfTargetAsset.Type.IsNetworkBoundary()
+}
+
+func (what CommunicationLink) HighestConfidentiality(parsedModel *ParsedModel) Confidentiality {
+ highest := Public
+ for _, dataId := range what.DataAssetsSent {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Confidentiality > highest {
+ highest = dataAsset.Confidentiality
+ }
+ }
+ for _, dataId := range what.DataAssetsReceived {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Confidentiality > highest {
+ highest = dataAsset.Confidentiality
+ }
+ }
+ return highest
+}
+
+func (what CommunicationLink) HighestIntegrity(parsedModel *ParsedModel) Criticality {
+ highest := Archive
+ for _, dataId := range what.DataAssetsSent {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Integrity > highest {
+ highest = dataAsset.Integrity
+ }
+ }
+ for _, dataId := range what.DataAssetsReceived {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Integrity > highest {
+ highest = dataAsset.Integrity
+ }
+ }
+ return highest
+}
+
+func (what CommunicationLink) HighestAvailability(parsedModel *ParsedModel) Criticality {
+ highest := Archive
+ for _, dataId := range what.DataAssetsSent {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Availability > highest {
+ highest = dataAsset.Availability
+ }
+ }
+ for _, dataId := range what.DataAssetsReceived {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Availability > highest {
+ highest = dataAsset.Availability
+ }
+ }
+ return highest
+}
+
+func (what CommunicationLink) DataAssetsSentSorted(parsedModel *ParsedModel) []DataAsset {
+ result := make([]DataAsset, 0)
+ for _, assetID := range what.DataAssetsSent {
+ result = append(result, parsedModel.DataAssets[assetID])
+ }
+ sort.Sort(byDataAssetTitleSort(result))
+ return result
+}
+
+func (what CommunicationLink) DataAssetsReceivedSorted(parsedModel *ParsedModel) []DataAsset {
+ result := make([]DataAsset, 0)
+ for _, assetID := range what.DataAssetsReceived {
+ result = append(result, parsedModel.DataAssets[assetID])
+ }
+ sort.Sort(byDataAssetTitleSort(result))
+ return result
+}
+
+func (what CommunicationLink) IsBidirectional() bool {
+ return len(what.DataAssetsSent) > 0 && len(what.DataAssetsReceived) > 0
+}
+
+type ByTechnicalCommunicationLinkIdSort []CommunicationLink
+
+func (what ByTechnicalCommunicationLinkIdSort) Len() int { return len(what) }
+func (what ByTechnicalCommunicationLinkIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTechnicalCommunicationLinkIdSort) Less(i, j int) bool {
+ return what[i].Id > what[j].Id
+}
+
+type ByTechnicalCommunicationLinkTitleSort []CommunicationLink
+
+func (what ByTechnicalCommunicationLinkTitleSort) Len() int { return len(what) }
+func (what ByTechnicalCommunicationLinkTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTechnicalCommunicationLinkTitleSort) Less(i, j int) bool {
+ return what[i].Title > what[j].Title
+}
diff --git a/pkg/security/types/confidentiality.go b/pkg/security/types/confidentiality.go
new file mode 100644
index 00000000..fad041ea
--- /dev/null
+++ b/pkg/security/types/confidentiality.go
@@ -0,0 +1,138 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Confidentiality int
+
+const (
+ Public Confidentiality = iota
+ Internal
+ Restricted
+ Confidential
+ StrictlyConfidential
+)
+
+func ConfidentialityValues() []TypeEnum {
+ return []TypeEnum{
+ Public,
+ Internal,
+ Restricted,
+ Confidential,
+ StrictlyConfidential,
+ }
+}
+
+func ParseConfidentiality(value string) (confidentiality Confidentiality, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range ConfidentialityValues() {
+ if candidate.String() == value {
+ return candidate.(Confidentiality), err
+ }
+ }
+ return confidentiality, errors.New("Unable to parse into type: " + value)
+}
+
+var ConfidentialityTypeDescription = [...]TypeDescription{
+ {"public", "Public available information"},
+ {"internal", "(Company) internal information - but all people in the institution can access it"},
+ {"restricted", "Internal and with restricted access"},
+ {"confidential", "Only a few selected people have access"},
+ {"strictly-confidential", "Highest secrecy level"},
+}
+
+func (what Confidentiality) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return ConfidentialityTypeDescription[what].Name
+}
+
+func (what Confidentiality) Explain() string {
+ return ConfidentialityTypeDescription[what].Description
+}
+
+func (what Confidentiality) AttackerAttractivenessForAsset() float64 {
+ // fibonacci starting at 8
+ return [...]float64{8, 13, 21, 34, 55}[what]
+}
+func (what Confidentiality) AttackerAttractivenessForProcessedOrStoredData() float64 {
+ // fibonacci starting at 5
+ return [...]float64{5, 8, 13, 21, 34}[what]
+}
+func (what Confidentiality) AttackerAttractivenessForInOutTransferredData() float64 {
+ // fibonacci starting at 2
+ return [...]float64{2, 3, 5, 8, 13}[what]
+}
+
+func (what Confidentiality) RatingStringInScale() string {
+ result := "(rated "
+ if what == Public {
+ result += "1"
+ }
+ if what == Internal {
+ result += "2"
+ }
+ if what == Restricted {
+ result += "3"
+ }
+ if what == Confidential {
+ result += "4"
+ }
+ if what == StrictlyConfidential {
+ result += "5"
+ }
+ result += " in scale of 5)"
+ return result
+}
+
+func (what Confidentiality) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Confidentiality) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Confidentiality) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Confidentiality) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Confidentiality) find(value string) (Confidentiality, error) {
+ for index, description := range ConfidentialityTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Confidentiality(index), nil
+ }
+ }
+
+ return Confidentiality(0), fmt.Errorf("unknown confidentiality value %q", value)
+}
diff --git a/pkg/security/types/confidentiality_test.go b/pkg/security/types/confidentiality_test.go
new file mode 100644
index 00000000..2edacf6a
--- /dev/null
+++ b/pkg/security/types/confidentiality_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseConfidentialityTest struct {
+ input string
+ expected Confidentiality
+ expectedError error
+}
+
+func TestParseConfidenitality(t *testing.T) {
+ testCases := map[string]ParseConfidentialityTest{
+ "public": {
+ input: "public",
+ expected: Public,
+ },
+ "internal": {
+ input: "internal",
+ expected: Internal,
+ },
+ "restricted": {
+ input: "restricted",
+ expected: Restricted,
+ },
+ "confidential": {
+ input: "confidential",
+ expected: Confidential,
+ },
+ "strictly-confidential": {
+ input: "strictly-confidential",
+ expected: StrictlyConfidential,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseConfidentiality(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/criticality.go b/pkg/security/types/criticality.go
new file mode 100644
index 00000000..a9e221b7
--- /dev/null
+++ b/pkg/security/types/criticality.go
@@ -0,0 +1,138 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Criticality int
+
+const (
+ Archive Criticality = iota
+ Operational
+ Important
+ Critical
+ MissionCritical
+)
+
+func CriticalityValues() []TypeEnum {
+ return []TypeEnum{
+ Archive,
+ Operational,
+ Important,
+ Critical,
+ MissionCritical,
+ }
+}
+
+func ParseCriticality(value string) (criticality Criticality, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range CriticalityValues() {
+ if candidate.String() == value {
+ return candidate.(Criticality), err
+ }
+ }
+ return criticality, errors.New("Unable to parse into type: " + value)
+}
+
+var CriticalityTypeDescription = [...]TypeDescription{
+ {"archive", "Stored, not active"},
+ {"operational", "If this fails, people will just have an ad-hoc coffee break until it is back"},
+ {"important", "Issues here results in angry people"},
+ {"critical", "Failure is really expensive or crippling"},
+ {"mission-critical", "This must not fail"},
+}
+
+func (what Criticality) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return CriticalityTypeDescription[what].Name
+}
+
+func (what Criticality) Explain() string {
+ return CriticalityTypeDescription[what].Description
+}
+
+func (what Criticality) AttackerAttractivenessForAsset() float64 {
+ // fibonacci starting at 5
+ return [...]float64{5, 8, 13, 21, 34}[what]
+}
+func (what Criticality) AttackerAttractivenessForProcessedOrStoredData() float64 {
+ // fibonacci starting at 3
+ return [...]float64{3, 5, 8, 13, 21}[what]
+}
+func (what Criticality) AttackerAttractivenessForInOutTransferredData() float64 {
+ // fibonacci starting at 2
+ return [...]float64{2, 3, 5, 8, 13}[what]
+}
+
+func (what Criticality) RatingStringInScale() string {
+ result := "(rated "
+ if what == Archive {
+ result += "1"
+ }
+ if what == Operational {
+ result += "2"
+ }
+ if what == Important {
+ result += "3"
+ }
+ if what == Critical {
+ result += "4"
+ }
+ if what == MissionCritical {
+ result += "5"
+ }
+ result += " in scale of 5)"
+ return result
+}
+
+func (what Criticality) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Criticality) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Criticality) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Criticality) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Criticality) find(value string) (Criticality, error) {
+ for index, description := range CriticalityTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Criticality(index), nil
+ }
+ }
+
+ return Criticality(0), fmt.Errorf("unknown criticality value %q", value)
+}
diff --git a/pkg/security/types/criticality_test.go b/pkg/security/types/criticality_test.go
new file mode 100644
index 00000000..55912084
--- /dev/null
+++ b/pkg/security/types/criticality_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseCriticalityTest struct {
+ input string
+ expected Criticality
+ expectedError error
+}
+
+func TestParseCriticality(t *testing.T) {
+ testCases := map[string]ParseCriticalityTest{
+ "archive": {
+ input: "archive",
+ expected: Archive,
+ },
+ "operational": {
+ input: "operational",
+ expected: Operational,
+ },
+ "important": {
+ input: "important",
+ expected: Important,
+ },
+ "critical": {
+ input: "critical",
+ expected: Critical,
+ },
+ "mission-critical": {
+ input: "mission-critical",
+ expected: MissionCritical,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseCriticality(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/data_asset.go b/pkg/security/types/data_asset.go
new file mode 100644
index 00000000..7fe1c46d
--- /dev/null
+++ b/pkg/security/types/data_asset.go
@@ -0,0 +1,248 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "sort"
+)
+
+type DataAsset struct {
+ Id string `yaml:"id,omitempty" json:"id,omitempty"` // TODO: tag here still required?
+ Title string `yaml:"title,omitempty" json:"title,omitempty"` // TODO: tag here still required?
+ Description string `yaml:"description,omitempty" json:"description,omitempty"` // TODO: tag here still required?
+ Usage Usage `yaml:"usage,omitempty" json:"usage,omitempty"`
+ Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"`
+ Origin string `yaml:"origin,omitempty" json:"origin,omitempty"`
+ Owner string `yaml:"owner,omitempty" json:"owner,omitempty"`
+ Quantity Quantity `yaml:"quantity,omitempty" json:"quantity,omitempty"`
+ Confidentiality Confidentiality `yaml:"confidentiality,omitempty" json:"confidentiality,omitempty"`
+ Integrity Criticality `yaml:"integrity,omitempty" json:"integrity,omitempty"`
+ Availability Criticality `yaml:"availability,omitempty" json:"availability,omitempty"`
+ JustificationCiaRating string `yaml:"justification_cia_rating,omitempty" json:"justification_cia_rating,omitempty"`
+}
+
+func (what DataAsset) IsTaggedWithAny(tags ...string) bool {
+ return containsCaseInsensitiveAny(what.Tags, tags...)
+}
+
+func (what DataAsset) IsTaggedWithBaseTag(baseTag string) bool {
+ return IsTaggedWithBaseTag(what.Tags, baseTag)
+}
+
+/*
+func (what DataAsset) IsAtRisk() bool {
+ for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() {
+ if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 {
+ return true
+ }
+ }
+ for _, techAsset := range what.StoredByTechnicalAssetsSorted() {
+ if len(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks())) > 0 {
+ return true
+ }
+ }
+ return false
+}
+*/
+
+/*
+func (what DataAsset) IdentifiedRiskSeverityStillAtRisk() RiskSeverity {
+ highestRiskSeverity := Low
+ for _, techAsset := range what.ProcessedByTechnicalAssetsSorted() {
+ candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks()))
+ if candidateSeverity > highestRiskSeverity {
+ highestRiskSeverity = candidateSeverity
+ }
+ }
+ for _, techAsset := range what.StoredByTechnicalAssetsSorted() {
+ candidateSeverity := HighestSeverityStillAtRisk(ReduceToOnlyStillAtRisk(techAsset.GeneratedRisks()))
+ if candidateSeverity > highestRiskSeverity {
+ highestRiskSeverity = candidateSeverity
+ }
+ }
+ return highestRiskSeverity
+}
+*/
+
+func (what DataAsset) IdentifiedRisksByResponsibleTechnicalAssetId(model *ParsedModel) map[string][]Risk {
+ uniqueTechAssetIDsResponsibleForThisDataAsset := make(map[string]interface{})
+ for _, techAsset := range what.ProcessedByTechnicalAssetsSorted(model) {
+ if len(techAsset.GeneratedRisks(model)) > 0 {
+ uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true
+ }
+ }
+ for _, techAsset := range what.StoredByTechnicalAssetsSorted(model) {
+ if len(techAsset.GeneratedRisks(model)) > 0 {
+ uniqueTechAssetIDsResponsibleForThisDataAsset[techAsset.Id] = true
+ }
+ }
+
+ result := make(map[string][]Risk)
+ for techAssetId := range uniqueTechAssetIDsResponsibleForThisDataAsset {
+ result[techAssetId] = append(result[techAssetId], model.TechnicalAssets[techAssetId].GeneratedRisks(model)...)
+ }
+ return result
+}
+
+func (what DataAsset) IsDataBreachPotentialStillAtRisk(parsedModel *ParsedModel) bool {
+ for _, risk := range FilteredByStillAtRisk(parsedModel) {
+ for _, techAsset := range risk.DataBreachTechnicalAssetIDs {
+ if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (what DataAsset) IdentifiedDataBreachProbability(parsedModel *ParsedModel) DataBreachProbability {
+ highestProbability := Improbable
+ for _, risk := range AllRisks(parsedModel) {
+ for _, techAsset := range risk.DataBreachTechnicalAssetIDs {
+ if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) {
+ if risk.DataBreachProbability > highestProbability {
+ highestProbability = risk.DataBreachProbability
+ break
+ }
+ }
+ }
+ }
+ return highestProbability
+}
+
+func (what DataAsset) IdentifiedDataBreachProbabilityStillAtRisk(parsedModel *ParsedModel) DataBreachProbability {
+ highestProbability := Improbable
+ for _, risk := range FilteredByStillAtRisk(parsedModel) {
+ for _, techAsset := range risk.DataBreachTechnicalAssetIDs {
+ if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) {
+ if risk.DataBreachProbability > highestProbability {
+ highestProbability = risk.DataBreachProbability
+ break
+ }
+ }
+ }
+ }
+ return highestProbability
+}
+
+func (what DataAsset) IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel *ParsedModel) []Risk {
+ result := make([]Risk, 0)
+ for _, risk := range FilteredByStillAtRisk(parsedModel) {
+ for _, techAsset := range risk.DataBreachTechnicalAssetIDs {
+ if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) {
+ result = append(result, risk)
+ break
+ }
+ }
+ }
+ return result
+}
+
+func (what DataAsset) IdentifiedDataBreachProbabilityRisks(parsedModel *ParsedModel) []Risk {
+ result := make([]Risk, 0)
+ for _, risk := range AllRisks(parsedModel) {
+ for _, techAsset := range risk.DataBreachTechnicalAssetIDs {
+ if contains(parsedModel.TechnicalAssets[techAsset].DataAssetsProcessed, what.Id) {
+ result = append(result, risk)
+ break
+ }
+ }
+ }
+ return result
+}
+
+func (what DataAsset) ProcessedByTechnicalAssetsSorted(parsedModel *ParsedModel) []TechnicalAsset {
+ result := make([]TechnicalAsset, 0)
+ for _, technicalAsset := range parsedModel.TechnicalAssets {
+ for _, candidateID := range technicalAsset.DataAssetsProcessed {
+ if candidateID == what.Id {
+ result = append(result, technicalAsset)
+ }
+ }
+ }
+ sort.Sort(ByTechnicalAssetTitleSort(result))
+ return result
+}
+
+func (what DataAsset) StoredByTechnicalAssetsSorted(parsedModel *ParsedModel) []TechnicalAsset {
+ result := make([]TechnicalAsset, 0)
+ for _, technicalAsset := range parsedModel.TechnicalAssets {
+ for _, candidateID := range technicalAsset.DataAssetsStored {
+ if candidateID == what.Id {
+ result = append(result, technicalAsset)
+ }
+ }
+ }
+ sort.Sort(ByTechnicalAssetTitleSort(result))
+ return result
+}
+
+func (what DataAsset) SentViaCommLinksSorted(parsedModel *ParsedModel) []CommunicationLink {
+ result := make([]CommunicationLink, 0)
+ for _, technicalAsset := range parsedModel.TechnicalAssets {
+ for _, commLink := range technicalAsset.CommunicationLinks {
+ for _, candidateID := range commLink.DataAssetsSent {
+ if candidateID == what.Id {
+ result = append(result, commLink)
+ }
+ }
+ }
+ }
+ sort.Sort(ByTechnicalCommunicationLinkTitleSort(result))
+ return result
+}
+
+func (what DataAsset) ReceivedViaCommLinksSorted(parsedModel *ParsedModel) []CommunicationLink {
+ result := make([]CommunicationLink, 0)
+ for _, technicalAsset := range parsedModel.TechnicalAssets {
+ for _, commLink := range technicalAsset.CommunicationLinks {
+ for _, candidateID := range commLink.DataAssetsReceived {
+ if candidateID == what.Id {
+ result = append(result, commLink)
+ }
+ }
+ }
+ }
+ sort.Sort(ByTechnicalCommunicationLinkTitleSort(result))
+ return result
+}
+
+func SortByDataAssetDataBreachProbabilityAndTitle(parsedModel *ParsedModel, assets []DataAsset) {
+ sort.Slice(assets, func(i, j int) bool {
+ highestDataBreachProbabilityLeft := assets[i].IdentifiedDataBreachProbability(parsedModel)
+ highestDataBreachProbabilityRight := assets[j].IdentifiedDataBreachProbability(parsedModel)
+ if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight {
+ return assets[i].Title < assets[j].Title
+ }
+ return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight
+ })
+}
+
+func SortByDataAssetDataBreachProbabilityAndTitleStillAtRisk(parsedModel *ParsedModel, assets []DataAsset) {
+ sort.Slice(assets, func(i, j int) bool {
+ risksLeft := assets[i].IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel)
+ risksRight := assets[j].IdentifiedDataBreachProbabilityRisksStillAtRisk(parsedModel)
+ highestDataBreachProbabilityLeft := assets[i].IdentifiedDataBreachProbabilityStillAtRisk(parsedModel)
+ highestDataBreachProbabilityRight := assets[j].IdentifiedDataBreachProbabilityStillAtRisk(parsedModel)
+ if highestDataBreachProbabilityLeft == highestDataBreachProbabilityRight {
+ if len(risksLeft) == 0 && len(risksRight) > 0 {
+ return false
+ }
+ if len(risksLeft) > 0 && len(risksRight) == 0 {
+ return true
+ }
+ return assets[i].Title < assets[j].Title
+ }
+ return highestDataBreachProbabilityLeft > highestDataBreachProbabilityRight
+ })
+}
+
+type ByDataAssetTitleSort []DataAsset
+
+func (what ByDataAssetTitleSort) Len() int { return len(what) }
+func (what ByDataAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByDataAssetTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
diff --git a/pkg/security/types/data_breach_probability.go b/pkg/security/types/data_breach_probability.go
new file mode 100644
index 00000000..1bd01a1f
--- /dev/null
+++ b/pkg/security/types/data_breach_probability.go
@@ -0,0 +1,106 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type DataBreachProbability int
+
+const (
+ Improbable DataBreachProbability = iota
+ Possible
+ Probable
+)
+
+func DataBreachProbabilityValues() []TypeEnum {
+ return []TypeEnum{
+ Improbable,
+ Possible,
+ Probable,
+ }
+}
+
+var DataBreachProbabilityTypeDescription = [...]TypeDescription{
+ {"improbable", "Improbable"},
+ {"possible", "Possible"},
+ {"probable", "Probable"},
+}
+
+func ParseDataBreachProbability(value string) (dataBreachProbability DataBreachProbability, err error) {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return Possible, err
+ }
+
+ for _, candidate := range DataBreachProbabilityValues() {
+ if candidate.String() == value {
+ return candidate.(DataBreachProbability), err
+ }
+ }
+ return dataBreachProbability, errors.New("Unable to parse into type: " + value)
+}
+
+func (what DataBreachProbability) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return DataBreachProbabilityTypeDescription[what].Name
+}
+
+func (what DataBreachProbability) Explain() string {
+ return DataBreachProbabilityTypeDescription[what].Description
+}
+
+func (what DataBreachProbability) Title() string {
+ return [...]string{"Improbable", "Possible", "Probable"}[what]
+}
+
+func (what DataBreachProbability) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *DataBreachProbability) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what DataBreachProbability) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *DataBreachProbability) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what DataBreachProbability) find(value string) (DataBreachProbability, error) {
+ for index, description := range DataBreachProbabilityTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return DataBreachProbability(index), nil
+ }
+ }
+
+ return DataBreachProbability(0), fmt.Errorf("unknown data breach probability value %q", value)
+}
diff --git a/pkg/security/types/data_breach_probability_test.go b/pkg/security/types/data_breach_probability_test.go
new file mode 100644
index 00000000..38feaaba
--- /dev/null
+++ b/pkg/security/types/data_breach_probability_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseDataBreachProbabilityTest struct {
+ input string
+ expected DataBreachProbability
+ expectedError error
+}
+
+func TestParseDataBreachProbability(t *testing.T) {
+ testCases := map[string]ParseDataBreachProbabilityTest{
+ "improbable": {
+ input: "improbable",
+ expected: Improbable,
+ },
+ "possible": {
+ input: "possible",
+ expected: Possible,
+ },
+ "probable": {
+ input: "probable",
+ expected: Probable,
+ },
+ "default": {
+ input: "",
+ expected: Possible,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseDataBreachProbability(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/data_format.go b/pkg/security/types/data_format.go
new file mode 100644
index 00000000..52b1aaae
--- /dev/null
+++ b/pkg/security/types/data_format.go
@@ -0,0 +1,121 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type DataFormat int
+
+const (
+ JSON DataFormat = iota
+ XML
+ Serialization
+ File
+ CSV
+)
+
+func DataFormatValues() []TypeEnum {
+ return []TypeEnum{
+ JSON,
+ XML,
+ Serialization,
+ File,
+ CSV,
+ }
+}
+
+var DataFormatTypeDescription = [...]TypeDescription{
+ {"json", "JSON"},
+ {"xml", "XML"},
+ {"serialization", "Serialized program objects"},
+ {"file", "Specific file types for data"},
+ {"csv", "CSV"},
+}
+
+func ParseDataFormat(value string) (dataFormat DataFormat, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range DataFormatValues() {
+ if candidate.String() == value {
+ return candidate.(DataFormat), err
+ }
+ }
+ return dataFormat, errors.New("Unable to parse into type: " + value)
+}
+
+func (what DataFormat) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return DataFormatTypeDescription[what].Name
+}
+
+func (what DataFormat) Explain() string {
+ return DataFormatTypeDescription[what].Description
+}
+
+func (what DataFormat) Title() string {
+ return [...]string{"JSON", "XML", "Serialization", "File", "CSV"}[what]
+}
+
+func (what DataFormat) Description() string {
+ return [...]string{"JSON marshalled object data", "XML structured data", "Serialization-based object graphs",
+ "File input/uploads", "CSV tabular data"}[what]
+}
+
+func (what DataFormat) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *DataFormat) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what DataFormat) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *DataFormat) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what DataFormat) find(value string) (DataFormat, error) {
+ for index, description := range DataFormatTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return DataFormat(index), nil
+ }
+ }
+
+ return DataFormat(0), fmt.Errorf("unknown data format value %q", value)
+}
+
+type ByDataFormatAcceptedSort []DataFormat
+
+func (what ByDataFormatAcceptedSort) Len() int { return len(what) }
+func (what ByDataFormatAcceptedSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByDataFormatAcceptedSort) Less(i, j int) bool {
+ return what[i].String() < what[j].String()
+}
diff --git a/pkg/security/types/data_format_test.go b/pkg/security/types/data_format_test.go
new file mode 100644
index 00000000..df2ade98
--- /dev/null
+++ b/pkg/security/types/data_format_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseDataFormatTest struct {
+ input string
+ expected DataFormat
+ expectedError error
+}
+
+func TestParseDataFormat(t *testing.T) {
+ testCases := map[string]ParseDataFormatTest{
+ "json": {
+ input: "json",
+ expected: JSON,
+ },
+ "xml": {
+ input: "xml",
+ expected: XML,
+ },
+ "serialization": {
+ input: "serialization",
+ expected: Serialization,
+ },
+ "file": {
+ input: "file",
+ expected: File,
+ },
+ "csv": {
+ input: "csv",
+ expected: CSV,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseDataFormat(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/date.go b/pkg/security/types/date.go
new file mode 100644
index 00000000..d800963f
--- /dev/null
+++ b/pkg/security/types/date.go
@@ -0,0 +1,40 @@
+package types
+
+import (
+ "gopkg.in/yaml.v3"
+ "time"
+)
+
+type Date struct {
+ time.Time
+}
+
+func (what Date) MarshalJSON() ([]byte, error) {
+ return []byte(what.Format(`"2006-01-02"`)), nil
+}
+
+func (what *Date) UnmarshalJSON(bytes []byte) error {
+ date, parseError := time.Parse(`"2006-01-02"`, string(bytes))
+ if parseError != nil {
+ return parseError
+ }
+
+ what.Time = date
+
+ return nil
+}
+
+func (what Date) MarshalYAML() (interface{}, error) {
+ return what.Format(`2006-01-02`), nil
+}
+
+func (what *Date) UnmarshalYAML(node *yaml.Node) error {
+ date, parseError := time.Parse(`2006-01-02`, node.Value)
+ if parseError != nil {
+ return parseError
+ }
+
+ what.Time = date
+
+ return nil
+}
diff --git a/pkg/security/types/encryption_style.go b/pkg/security/types/encryption_style.go
new file mode 100644
index 00000000..32afa9f2
--- /dev/null
+++ b/pkg/security/types/encryption_style.go
@@ -0,0 +1,108 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type EncryptionStyle int
+
+const (
+ NoneEncryption EncryptionStyle = iota
+ Transparent
+ DataWithSymmetricSharedKey
+ DataWithAsymmetricSharedKey
+ DataWithEndUserIndividualKey
+)
+
+func EncryptionStyleValues() []TypeEnum {
+ return []TypeEnum{
+ NoneEncryption,
+ Transparent,
+ DataWithSymmetricSharedKey,
+ DataWithAsymmetricSharedKey,
+ DataWithEndUserIndividualKey,
+ }
+}
+
+func ParseEncryptionStyle(value string) (encryptionStyle EncryptionStyle, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range EncryptionStyleValues() {
+ if candidate.String() == value {
+ return candidate.(EncryptionStyle), err
+ }
+ }
+ return encryptionStyle, errors.New("Unable to parse into type: " + value)
+}
+
+var EncryptionStyleTypeDescription = [...]TypeDescription{
+ {"none", "No encryption"},
+ {"transparent", "Encrypted data at rest"},
+ {"data-with-symmetric-shared-key", "Both communication partners have the same key. This must be kept secret"},
+ {"data-with-asymmetric-shared-key", "The key is split into public and private. Those two are shared between partners"},
+ {"data-with-enduser-individual-key", "The key is (managed) by the end user"},
+}
+
+func (what EncryptionStyle) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return EncryptionStyleTypeDescription[what].Name
+}
+
+func (what EncryptionStyle) Explain() string {
+ return EncryptionStyleTypeDescription[what].Description
+}
+
+func (what EncryptionStyle) Title() string {
+ return [...]string{"None", "Transparent", "Data with Symmetric Shared Key", "Data with Asymmetric Shared Key", "Data with End-User Individual Key"}[what]
+}
+
+func (what EncryptionStyle) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *EncryptionStyle) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what EncryptionStyle) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *EncryptionStyle) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what EncryptionStyle) find(value string) (EncryptionStyle, error) {
+ for index, description := range EncryptionStyleTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return EncryptionStyle(index), nil
+ }
+ }
+
+ return EncryptionStyle(0), fmt.Errorf("unknown encryption style value %q", value)
+}
diff --git a/pkg/security/types/encryption_style_test.go b/pkg/security/types/encryption_style_test.go
new file mode 100644
index 00000000..36bdf560
--- /dev/null
+++ b/pkg/security/types/encryption_style_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseEncryptionStyleTest struct {
+ input string
+ expected EncryptionStyle
+ expectedError error
+}
+
+func TestParseEncryptionStyle(t *testing.T) {
+ testCases := map[string]ParseEncryptionStyleTest{
+ "none": {
+ input: "none",
+ expected: NoneEncryption,
+ },
+ "transparent": {
+ input: "transparent",
+ expected: Transparent,
+ },
+ "data-with-symmetric-shared-key": {
+ input: "data-with-symmetric-shared-key",
+ expected: DataWithSymmetricSharedKey,
+ },
+ "data-with-asymmetric-shared-key": {
+ input: "data-with-asymmetric-shared-key",
+ expected: DataWithAsymmetricSharedKey,
+ },
+ "data-with-enduser-individual-key": {
+ input: "data-with-enduser-individual-key",
+ expected: DataWithEndUserIndividualKey,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseEncryptionStyle(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/helpers.go b/pkg/security/types/helpers.go
new file mode 100644
index 00000000..0090195e
--- /dev/null
+++ b/pkg/security/types/helpers.go
@@ -0,0 +1,54 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "regexp"
+ "strings"
+)
+
+func MakeID(val string) string {
+ reg, _ := regexp.Compile("[^A-Za-z0-9]+")
+ return strings.Trim(reg.ReplaceAllString(strings.ToLower(val), "-"), "- ")
+}
+
+func contains(a []string, x string) bool {
+ for _, n := range a {
+ if x == n {
+ return true
+ }
+ }
+ return false
+}
+
+func containsCaseInsensitiveAny(a []string, x ...string) bool {
+ for _, n := range a {
+ for _, c := range x {
+ if strings.TrimSpace(strings.ToLower(c)) == strings.TrimSpace(strings.ToLower(n)) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func IsTaggedWithBaseTag(tags []string, baseTag string) bool { // base tags are before the colon ":" like in "aws:ec2" it's "aws". The subtag is after the colon. Also, a pure "aws" tag matches the base tag "aws"
+ baseTag = strings.ToLower(strings.TrimSpace(baseTag))
+ for _, tag := range tags {
+ tag = strings.ToLower(strings.TrimSpace(tag))
+ if tag == baseTag || strings.HasPrefix(tag, baseTag+":") {
+ return true
+ }
+ }
+ return false
+}
+
+type byDataAssetTitleSort []DataAsset
+
+func (what byDataAssetTitleSort) Len() int { return len(what) }
+func (what byDataAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what byDataAssetTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
diff --git a/pkg/security/types/model.go b/pkg/security/types/model.go
new file mode 100644
index 00000000..97128f7a
--- /dev/null
+++ b/pkg/security/types/model.go
@@ -0,0 +1,427 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "github.com/threagile/threagile/pkg/input"
+ "regexp"
+ "slices"
+ "sort"
+ "strings"
+)
+
+// TODO: move model out of types package and
+// rename parsedModel to model or something like this to emphasize that it's just a model
+// maybe
+type ParsedModel struct {
+ ThreagileVersion string `yaml:"threagile_version,omitempty" json:"threagile_version,omitempty"`
+ Includes []string `yaml:"includes,omitempty" json:"includes,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Author input.Author `json:"author,omitempty" yaml:"author,omitempty"`
+ Contributors []input.Author `yaml:"contributors,omitempty" json:"contributors,omitempty"`
+ Date Date `json:"date,omitempty" yaml:"date,omitempty"`
+ AppDescription input.Overview `yaml:"application_description,omitempty" json:"application_description,omitempty"`
+ BusinessOverview input.Overview `json:"business_overview,omitempty" yaml:"business_overview,omitempty"`
+ TechnicalOverview input.Overview `json:"technical_overview,omitempty" yaml:"technical_overview,omitempty"`
+ BusinessCriticality Criticality `json:"business_criticality,omitempty" yaml:"business_criticality,omitempty"`
+ ManagementSummaryComment string `json:"management_summary_comment,omitempty" yaml:"management_summary_comment,omitempty"`
+ SecurityRequirements map[string]string `json:"security_requirements,omitempty" yaml:"security_requirements,omitempty"`
+ Questions map[string]string `json:"questions,omitempty" yaml:"questions,omitempty"`
+ AbuseCases map[string]string `json:"abuse_cases,omitempty" yaml:"abuse_cases,omitempty"`
+ TagsAvailable []string `json:"tags_available,omitempty" yaml:"tags_available,omitempty"`
+ DataAssets map[string]DataAsset `json:"data_assets,omitempty" yaml:"data_assets,omitempty"`
+ TechnicalAssets map[string]TechnicalAsset `json:"technical_assets,omitempty" yaml:"technical_assets,omitempty"`
+ TrustBoundaries map[string]TrustBoundary `json:"trust_boundaries,omitempty" yaml:"trust_boundaries,omitempty"`
+ SharedRuntimes map[string]SharedRuntime `json:"shared_runtimes,omitempty" yaml:"shared_runtimes,omitempty"`
+ IndividualRiskCategories map[string]RiskCategory `json:"individual_risk_categories,omitempty" yaml:"individual_risk_categories,omitempty"`
+ BuiltInRiskCategories map[string]RiskCategory `json:"built_in_risk_categories,omitempty" yaml:"built_in_risk_categories,omitempty"`
+ RiskTracking map[string]RiskTracking `json:"risk_tracking,omitempty" yaml:"risk_tracking,omitempty"`
+ CommunicationLinks map[string]CommunicationLink `json:"communication_links,omitempty" yaml:"communication_links,omitempty"`
+ AllSupportedTags map[string]bool `json:"all_supported_tags,omitempty" yaml:"all_supported_tags,omitempty"`
+ DiagramTweakNodesep int `json:"diagram_tweak_nodesep,omitempty" yaml:"diagram_tweak_nodesep,omitempty"`
+ DiagramTweakRanksep int `json:"diagram_tweak_ranksep,omitempty" yaml:"diagram_tweak_ranksep,omitempty"`
+ DiagramTweakEdgeLayout string `json:"diagram_tweak_edge_layout,omitempty" yaml:"diagram_tweak_edge_layout,omitempty"`
+ DiagramTweakSuppressEdgeLabels bool `json:"diagram_tweak_suppress_edge_labels,omitempty" yaml:"diagram_tweak_suppress_edge_labels,omitempty"`
+ DiagramTweakLayoutLeftToRight bool `json:"diagram_tweak_layout_left_to_right,omitempty" yaml:"diagram_tweak_layout_left_to_right,omitempty"`
+ DiagramTweakInvisibleConnectionsBetweenAssets []string `json:"diagram_tweak_invisible_connections_between_assets,omitempty" yaml:"diagram_tweak_invisible_connections_between_assets,omitempty"`
+ DiagramTweakSameRankAssets []string `json:"diagram_tweak_same_rank_assets,omitempty" yaml:"diagram_tweak_same_rank_assets,omitempty"`
+
+ // TODO: those are generated based on items above and needs to be private
+ IncomingTechnicalCommunicationLinksMappedByTargetId map[string][]CommunicationLink `json:"incoming_technical_communication_links_mapped_by_target_id,omitempty" yaml:"incoming_technical_communication_links_mapped_by_target_id,omitempty"`
+ DirectContainingTrustBoundaryMappedByTechnicalAssetId map[string]TrustBoundary `json:"direct_containing_trust_boundary_mapped_by_technical_asset_id,omitempty" yaml:"direct_containing_trust_boundary_mapped_by_technical_asset_id,omitempty"`
+ GeneratedRisksByCategory map[string][]Risk `json:"generated_risks_by_category,omitempty" yaml:"generated_risks_by_category,omitempty"`
+ GeneratedRisksBySyntheticId map[string]Risk `json:"generated_risks_by_synthetic_id,omitempty" yaml:"generated_risks_by_synthetic_id,omitempty"`
+}
+
+func (parsedModel *ParsedModel) AddToListOfSupportedTags(tags []string) {
+ for _, tag := range tags {
+ parsedModel.AllSupportedTags[tag] = true
+ }
+}
+
+func (parsedModel *ParsedModel) GetDeferredRiskTrackingDueToWildcardMatching() map[string]RiskTracking {
+ deferredRiskTrackingDueToWildcardMatching := make(map[string]RiskTracking)
+ for syntheticRiskId, riskTracking := range parsedModel.RiskTracking {
+ if strings.Contains(syntheticRiskId, "*") { // contains a wildcard char
+ deferredRiskTrackingDueToWildcardMatching[syntheticRiskId] = riskTracking
+ }
+ }
+
+ return deferredRiskTrackingDueToWildcardMatching
+}
+
+func (parsedModel *ParsedModel) HasNotYetAnyDirectNonWildcardRiskTracking(syntheticRiskId string) bool {
+ if _, ok := parsedModel.RiskTracking[syntheticRiskId]; ok {
+ return false
+ }
+ return true
+}
+
+func (parsedModel *ParsedModel) CheckTags(tags []string, where string) ([]string, error) {
+ var tagsUsed = make([]string, 0)
+ if tags != nil {
+ tagsUsed = make([]string, len(tags))
+ for i, parsedEntry := range tags {
+ referencedTag := fmt.Sprintf("%v", parsedEntry)
+ err := parsedModel.CheckTagExists(referencedTag, where)
+ if err != nil {
+ return nil, err
+ }
+ tagsUsed[i] = referencedTag
+ }
+ }
+ return tagsUsed, nil
+}
+
+func (parsedModel *ParsedModel) ApplyWildcardRiskTrackingEvaluation(ignoreOrphanedRiskTracking bool, progressReporter progressReporter) error {
+ progressReporter.Info("Executing risk tracking evaluation")
+ for syntheticRiskIdPattern, riskTracking := range parsedModel.GetDeferredRiskTrackingDueToWildcardMatching() {
+ progressReporter.Info("Applying wildcard risk tracking for risk id: " + syntheticRiskIdPattern)
+
+ foundSome := false
+ var matchingRiskIdExpression = regexp.MustCompile(strings.ReplaceAll(regexp.QuoteMeta(syntheticRiskIdPattern), `\*`, `[^@]+`))
+ for syntheticRiskId := range parsedModel.GeneratedRisksBySyntheticId {
+ if matchingRiskIdExpression.Match([]byte(syntheticRiskId)) && parsedModel.HasNotYetAnyDirectNonWildcardRiskTracking(syntheticRiskId) {
+ foundSome = true
+ parsedModel.RiskTracking[syntheticRiskId] = RiskTracking{
+ SyntheticRiskId: strings.TrimSpace(syntheticRiskId),
+ Justification: riskTracking.Justification,
+ CheckedBy: riskTracking.CheckedBy,
+ Ticket: riskTracking.Ticket,
+ Status: riskTracking.Status,
+ Date: riskTracking.Date,
+ }
+ }
+ }
+
+ if !foundSome {
+ if ignoreOrphanedRiskTracking {
+ progressReporter.Warn("WARNING: Wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern)
+ } else {
+ return errors.New("wildcard risk tracking does not match any risk id: " + syntheticRiskIdPattern)
+ }
+ }
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckRiskTracking(ignoreOrphanedRiskTracking bool, progressReporter progressReporter) error {
+ progressReporter.Info("Checking risk tracking")
+ for _, tracking := range parsedModel.RiskTracking {
+ if _, ok := parsedModel.GeneratedRisksBySyntheticId[tracking.SyntheticRiskId]; !ok {
+ if ignoreOrphanedRiskTracking {
+ progressReporter.Info("Risk tracking references unknown risk (risk id not found): " + tracking.SyntheticRiskId)
+ } else {
+ return errors.New("Risk tracking references unknown risk (risk id not found) - you might want to use the option -ignore-orphaned-risk-tracking: " + tracking.SyntheticRiskId +
+ "\n\nNOTE: For risk tracking each risk-id needs to be defined (the string with the @ sign in it). " +
+ "These unique risk IDs are visible in the PDF report (the small grey string under each risk), " +
+ "the Excel (column \"ID\"), as well as the JSON responses. Some risk IDs have only one @ sign in them, " +
+ "while others multiple. The idea is to allow for unique but still speaking IDs. Therefore each risk instance " +
+ "creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part. " +
+ "Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. " +
+ "Best is to lookup the IDs to use in the created Excel file. Alternatively a model macro \"seed-risk-tracking\" " +
+ "is available that helps in initially seeding the risk tracking part here based on already identified and not yet handled risks.")
+ }
+ }
+ }
+
+ // save also the risk-category-id and risk-status directly in the risk for better JSON marshalling
+ for category := range parsedModel.GeneratedRisksByCategory {
+ for i := range parsedModel.GeneratedRisksByCategory[category] {
+ // context.parsedModel.GeneratedRisksByCategory[category][i].CategoryId = category
+ parsedModel.GeneratedRisksByCategory[category][i].RiskStatus = parsedModel.GeneratedRisksByCategory[category][i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
+ }
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckTagExists(referencedTag, where string) error {
+ if !slices.Contains(parsedModel.TagsAvailable, referencedTag) {
+ return errors.New("missing referenced tag in overall tag list at " + where + ": " + referencedTag)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckDataAssetTargetExists(referencedAsset, where string) error {
+ if _, ok := parsedModel.DataAssets[referencedAsset]; !ok {
+ return errors.New("missing referenced data asset target at " + where + ": " + referencedAsset)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckTrustBoundaryExists(referencedId, where string) error {
+ if _, ok := parsedModel.TrustBoundaries[referencedId]; !ok {
+ return errors.New("missing referenced trust boundary at " + where + ": " + referencedId)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckSharedRuntimeExists(referencedId, where string) error {
+ if _, ok := parsedModel.SharedRuntimes[referencedId]; !ok {
+ return errors.New("missing referenced shared runtime at " + where + ": " + referencedId)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckCommunicationLinkExists(referencedId, where string) error {
+ if _, ok := parsedModel.CommunicationLinks[referencedId]; !ok {
+ return errors.New("missing referenced communication link at " + where + ": " + referencedId)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckTechnicalAssetExists(referencedAsset, where string, onlyForTweak bool) error {
+ if _, ok := parsedModel.TechnicalAssets[referencedAsset]; !ok {
+ suffix := ""
+ if onlyForTweak {
+ suffix = " (only referenced in diagram tweak)"
+ }
+ return errors.New("missing referenced technical asset target" + suffix + " at " + where + ": " + referencedAsset)
+ }
+ return nil
+}
+
+func (parsedModel *ParsedModel) CheckNestedTrustBoundariesExisting() error {
+ for _, trustBoundary := range parsedModel.TrustBoundaries {
+ for _, nestedId := range trustBoundary.TrustBoundariesNested {
+ if _, ok := parsedModel.TrustBoundaries[nestedId]; !ok {
+ return errors.New("missing referenced nested trust boundary: " + nestedId)
+ }
+ }
+ }
+ return nil
+}
+
+func CalculateSeverity(likelihood RiskExploitationLikelihood, impact RiskExploitationImpact) RiskSeverity {
+ result := likelihood.Weight() * impact.Weight()
+ if result <= 1 {
+ return LowSeverity
+ }
+ if result <= 3 {
+ return MediumSeverity
+ }
+ if result <= 8 {
+ return ElevatedSeverity
+ }
+ if result <= 12 {
+ return HighSeverity
+ }
+ return CriticalSeverity
+}
+
+func (parsedModel *ParsedModel) InScopeTechnicalAssets() []TechnicalAsset {
+ result := make([]TechnicalAsset, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ if !asset.OutOfScope {
+ result = append(result, asset)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) SortedTechnicalAssetIDs() []string {
+ res := make([]string, 0)
+ for id := range parsedModel.TechnicalAssets {
+ res = append(res, id)
+ }
+ sort.Strings(res)
+ return res
+}
+
+func (parsedModel *ParsedModel) TagsActuallyUsed() []string {
+ result := make([]string, 0)
+ for _, tag := range parsedModel.TagsAvailable {
+ if len(parsedModel.TechnicalAssetsTaggedWithAny(tag)) > 0 ||
+ len(parsedModel.CommunicationLinksTaggedWithAny(tag)) > 0 ||
+ len(parsedModel.DataAssetsTaggedWithAny(tag)) > 0 ||
+ len(parsedModel.TrustBoundariesTaggedWithAny(tag)) > 0 ||
+ len(parsedModel.SharedRuntimesTaggedWithAny(tag)) > 0 {
+ result = append(result, tag)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) TechnicalAssetsTaggedWithAny(tags ...string) []TechnicalAsset {
+ result := make([]TechnicalAsset, 0)
+ for _, candidate := range parsedModel.TechnicalAssets {
+ if candidate.IsTaggedWithAny(tags...) {
+ result = append(result, candidate)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) CommunicationLinksTaggedWithAny(tags ...string) []CommunicationLink {
+ result := make([]CommunicationLink, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ for _, candidate := range asset.CommunicationLinks {
+ if candidate.IsTaggedWithAny(tags...) {
+ result = append(result, candidate)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) DataAssetsTaggedWithAny(tags ...string) []DataAsset {
+ result := make([]DataAsset, 0)
+ for _, candidate := range parsedModel.DataAssets {
+ if candidate.IsTaggedWithAny(tags...) {
+ result = append(result, candidate)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) TrustBoundariesTaggedWithAny(tags ...string) []TrustBoundary {
+ result := make([]TrustBoundary, 0)
+ for _, candidate := range parsedModel.TrustBoundaries {
+ if candidate.IsTaggedWithAny(tags...) {
+ result = append(result, candidate)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) SharedRuntimesTaggedWithAny(tags ...string) []SharedRuntime {
+ result := make([]SharedRuntime, 0)
+ for _, candidate := range parsedModel.SharedRuntimes {
+ if candidate.IsTaggedWithAny(tags...) {
+ result = append(result, candidate)
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) OutOfScopeTechnicalAssets() []TechnicalAsset {
+ assets := make([]TechnicalAsset, 0)
+ for _, asset := range parsedModel.TechnicalAssets {
+ if asset.OutOfScope {
+ assets = append(assets, asset)
+ }
+ }
+ sort.Sort(ByTechnicalAssetTitleSort(assets))
+ return assets
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlySTRIDEInformationDisclosure(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == InformationDisclosure {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlySTRIDEDenialOfService(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == DenialOfService {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlySTRIDEElevationOfPrivilege(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == ElevationOfPrivilege {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlyBusinessSide(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == BusinessSide {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlyArchitecture(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Architecture {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlyDevelopment(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Development {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func (parsedModel *ParsedModel) RisksOfOnlyOperation(risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, categoryRisks := range risksByCategory {
+ for _, risk := range categoryRisks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Operations {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+type progressReporter interface {
+ Info(a ...any)
+ Warn(a ...any)
+ Error(a ...any)
+}
diff --git a/pkg/security/types/protocol.go b/pkg/security/types/protocol.go
new file mode 100644
index 00000000..532d26bc
--- /dev/null
+++ b/pkg/security/types/protocol.go
@@ -0,0 +1,255 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Protocol int
+
+const (
+ UnknownProtocol Protocol = iota
+ HTTP
+ HTTPS
+ WS
+ WSS
+ ReverseProxyWebProtocol
+ ReverseProxyWebProtocolEncrypted
+ MQTT
+ JDBC
+ JdbcEncrypted
+ ODBC
+ OdbcEncrypted
+ SqlAccessProtocol
+ SqlAccessProtocolEncrypted
+ NosqlAccessProtocol
+ NosqlAccessProtocolEncrypted
+ BINARY
+ BinaryEncrypted
+ TEXT
+ TextEncrypted
+ SSH
+ SshTunnel
+ SMTP
+ SmtpEncrypted
+ POP3
+ Pop3Encrypted
+ IMAP
+ ImapEncrypted
+ FTP
+ FTPS
+ SFTP
+ SCP
+ LDAP
+ LDAPS
+ JMS
+ NFS
+ SMB
+ SmbEncrypted
+ LocalFileAccess
+ NRPE
+ XMPP
+ IIOP
+ IiopEncrypted
+ JRMP
+ JrmpEncrypted
+ InProcessLibraryCall
+ ContainerSpawning
+)
+
+func ProtocolValues() []TypeEnum {
+ return []TypeEnum{
+ UnknownProtocol,
+ HTTP,
+ HTTPS,
+ WS,
+ WSS,
+ ReverseProxyWebProtocol,
+ ReverseProxyWebProtocolEncrypted,
+ MQTT,
+ JDBC,
+ JdbcEncrypted,
+ ODBC,
+ OdbcEncrypted,
+ SqlAccessProtocol,
+ SqlAccessProtocolEncrypted,
+ NosqlAccessProtocol,
+ NosqlAccessProtocolEncrypted,
+ BINARY,
+ BinaryEncrypted,
+ TEXT,
+ TextEncrypted,
+ SSH,
+ SshTunnel,
+ SMTP,
+ SmtpEncrypted,
+ POP3,
+ Pop3Encrypted,
+ IMAP,
+ ImapEncrypted,
+ FTP,
+ FTPS,
+ SFTP,
+ SCP,
+ LDAP,
+ LDAPS,
+ JMS,
+ NFS,
+ SMB,
+ SmbEncrypted,
+ LocalFileAccess,
+ NRPE,
+ XMPP,
+ IIOP,
+ IiopEncrypted,
+ JRMP,
+ JrmpEncrypted,
+ InProcessLibraryCall,
+ ContainerSpawning,
+ }
+}
+
+var ProtocolTypeDescription = [...]TypeDescription{
+ {"unknown-protocol", "Unknown protocol"},
+ {"http", "HTTP protocol"},
+ {"https", "HTTPS protocol (encrypted)"},
+ {"ws", "WebSocket"},
+ {"wss", "WebSocket but encrypted"},
+ {"reverse-proxy-web-protocol", "Protocols used by reverse proxies"},
+ {"reverse-proxy-web-protocol-encrypted", "Protocols used by reverse proxies but encrypted"},
+ {"mqtt", "MQTT Message protocol. Encryption via TLS is optional"},
+ {"jdbc", "Java Database Connectivity"},
+ {"jdbc-encrypted", "Java Database Connectivity but encrypted"},
+ {"odbc", "Open Database Connectivity"},
+ {"odbc-encrypted", "Open Database Connectivity but encrypted"},
+ {"sql-access-protocol", "SQL access protocol"},
+ {"sql-access-protocol-encrypted", "SQL access protocol but encrypted"},
+ {"nosql-access-protocol", "NOSQL access protocol"},
+ {"nosql-access-protocol-encrypted", "NOSQL access protocol but encrypted"},
+ {"binary", "Some other binary protocol"},
+ {"binary-encrypted", "Some other binary protocol, encrypted"},
+ {"text", "Some other text protocol"},
+ {"text-encrypted", "Some other text protocol, encrypted"},
+ {"ssh", "Secure Shell to execute commands"},
+ {"ssh-tunnel", "Secure Shell as a tunnel"},
+ {"smtp", "Mail transfer protocol (sending)"},
+ {"smtp-encrypted", "Mail transfer protocol (sending), encrypted"},
+ {"pop3", "POP 3 mail fetching"},
+ {"pop3-encrypted", "POP 3 mail fetching, encrypted"},
+ {"imap", "IMAP mail sync protocol"},
+ {"imap-encrypted", "IMAP mail sync protocol, encrypted"},
+ {"ftp", "File Transfer Protocol"},
+ {"ftps", "FTP with TLS"},
+ {"sftp", "FTP on SSH"},
+ {"scp", "Secure Shell to copy files"},
+ {"ldap", "Lightweight Directory Access Protocol - User directories"},
+ {"ldaps", "Lightweight Directory Access Protocol - User directories on TLS"},
+ {"jms", "Jakarta Messaging"},
+ {"nfs", "Network File System"},
+ {"smb", "Server Message Block"},
+ {"smb-encrypted", "Server Message Block, but encrypted"},
+ {"local-file-access", "Data files are on the local system"},
+ {"nrpe", "Nagios Remote Plugin Executor"},
+ {"xmpp", "Extensible Messaging and Presence Protocol"},
+ {"iiop", "Internet Inter-ORB Protocol "},
+ {"iiop-encrypted", "Internet Inter-ORB Protocol , encrypted"},
+ {"jrmp", "Java Remote Method Protocol"},
+ {"jrmp-encrypted", "Java Remote Method Protocol, encrypted"},
+ {"in-process-library-call", "Call to local library"},
+ {"container-spawning", "Spawn a container"},
+}
+
+func ParseProtocol(value string) (protocol Protocol, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range ProtocolValues() {
+ if candidate.String() == value {
+ return candidate.(Protocol), err
+ }
+ }
+ return protocol, errors.New("Unable to parse into type: " + value)
+}
+
+func (what Protocol) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return ProtocolTypeDescription[what].Name
+}
+
+func (what Protocol) Explain() string {
+ return ProtocolTypeDescription[what].Description
+}
+
+func (what Protocol) IsProcessLocal() bool {
+ return what == InProcessLibraryCall || what == LocalFileAccess || what == ContainerSpawning
+}
+
+func (what Protocol) IsEncrypted() bool {
+ return what == HTTPS || what == WSS || what == JdbcEncrypted || what == OdbcEncrypted ||
+ what == NosqlAccessProtocolEncrypted || what == SqlAccessProtocolEncrypted || what == BinaryEncrypted || what == TextEncrypted || what == SSH || what == SshTunnel ||
+ what == FTPS || what == SFTP || what == SCP || what == LDAPS || what == ReverseProxyWebProtocolEncrypted ||
+ what == IiopEncrypted || what == JrmpEncrypted || what == SmbEncrypted || what == SmtpEncrypted || what == Pop3Encrypted || what == ImapEncrypted
+}
+
+func (what Protocol) IsPotentialDatabaseAccessProtocol(includingLaxDatabaseProtocols bool) bool {
+ strictlyDatabaseOnlyProtocol := what == JdbcEncrypted || what == OdbcEncrypted ||
+ what == NosqlAccessProtocolEncrypted || what == SqlAccessProtocolEncrypted || what == JDBC || what == ODBC || what == NosqlAccessProtocol || what == SqlAccessProtocol
+ if includingLaxDatabaseProtocols {
+ // include HTTP for REST-based NoSQL-DBs as well as unknown binary
+ return strictlyDatabaseOnlyProtocol || what == HTTPS || what == HTTP || what == BINARY || what == BinaryEncrypted
+ }
+ return strictlyDatabaseOnlyProtocol
+}
+
+func (what Protocol) IsPotentialWebAccessProtocol() bool {
+ return what == HTTP || what == HTTPS || what == WS || what == WSS || what == ReverseProxyWebProtocol || what == ReverseProxyWebProtocolEncrypted
+}
+
+func (what Protocol) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Protocol) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Protocol) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Protocol) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Protocol) find(value string) (Protocol, error) {
+ for index, description := range ProtocolTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Protocol(index), nil
+ }
+ }
+
+ return Protocol(0), fmt.Errorf("unknown protocol value %q", value)
+}
diff --git a/pkg/security/types/protocol_test.go b/pkg/security/types/protocol_test.go
new file mode 100644
index 00000000..cd9cefd4
--- /dev/null
+++ b/pkg/security/types/protocol_test.go
@@ -0,0 +1,224 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseProtocolTest struct {
+ input string
+ expected Protocol
+ expectedError error
+}
+
+func TestParseProtocol(t *testing.T) {
+ testCases := map[string]ParseProtocolTest{
+ "unknown-protocol": {
+ input: "unknown-protocol",
+ expected: UnknownProtocol,
+ },
+ "http": {
+ input: "http",
+ expected: HTTP,
+ },
+ "https": {
+ input: "https",
+ expected: HTTPS,
+ },
+ "ws": {
+ input: "ws",
+ expected: WS,
+ },
+ "wss": {
+ input: "wss",
+ expected: WSS,
+ },
+ "reverse-proxy-web-protocol": {
+ input: "reverse-proxy-web-protocol",
+ expected: ReverseProxyWebProtocol,
+ },
+ "reverse-proxy-web-protocol-encrypted": {
+ input: "reverse-proxy-web-protocol-encrypted",
+ expected: ReverseProxyWebProtocolEncrypted,
+ },
+ "mqtt": {
+ input: "mqtt",
+ expected: MQTT,
+ },
+ "jdbc": {
+ input: "jdbc",
+ expected: JDBC,
+ },
+ "jdbc-encrypted": {
+ input: "jdbc-encrypted",
+ expected: JdbcEncrypted,
+ },
+ "odbc": {
+ input: "odbc",
+ expected: ODBC,
+ },
+ "odbc-encrypted": {
+ input: "odbc-encrypted",
+ expected: OdbcEncrypted,
+ },
+ "sql-access-protocol": {
+ input: "sql-access-protocol",
+ expected: SqlAccessProtocol,
+ },
+ "sql-access-protocol-encrypted": {
+ input: "sql-access-protocol-encrypted",
+ expected: SqlAccessProtocolEncrypted,
+ },
+ "nosql-access-protocol": {
+ input: "nosql-access-protocol",
+ expected: NosqlAccessProtocol,
+ },
+ "nosql-access-protocol-encrypted": {
+ input: "nosql-access-protocol-encrypted",
+ expected: NosqlAccessProtocolEncrypted,
+ },
+ "binary": {
+ input: "binary",
+ expected: BINARY,
+ },
+ "binary-encrypted": {
+ input: "binary-encrypted",
+ expected: BinaryEncrypted,
+ },
+ "text": {
+ input: "text",
+ expected: TEXT,
+ },
+ "text-encrypted": {
+ input: "text-encrypted",
+ expected: TextEncrypted,
+ },
+ "ssh": {
+ input: "ssh",
+ expected: SSH,
+ },
+ "ssh-tunnel": {
+ input: "ssh-tunnel",
+ expected: SshTunnel,
+ },
+ "smtp": {
+ input: "smtp",
+ expected: SMTP,
+ },
+ "smtp-encrypted": {
+ input: "smtp-encrypted",
+ expected: SmtpEncrypted,
+ },
+ "pop3": {
+ input: "pop3",
+ expected: POP3,
+ },
+ "pop3-encrypted": {
+ input: "pop3-encrypted",
+ expected: Pop3Encrypted,
+ },
+ "imap": {
+ input: "imap",
+ expected: IMAP,
+ },
+ "imap-encrypted": {
+ input: "imap-encrypted",
+ expected: ImapEncrypted,
+ },
+ "ftp": {
+ input: "ftp",
+ expected: FTP,
+ },
+ "ftps": {
+ input: "ftps",
+ expected: FTPS,
+ },
+ "sftp": {
+ input: "sftp",
+ expected: SFTP,
+ },
+ "scp": {
+ input: "scp",
+ expected: SCP,
+ },
+ "ldap": {
+ input: "ldap",
+ expected: LDAP,
+ },
+ "ldaps": {
+ input: "ldaps",
+ expected: LDAPS,
+ },
+ "jms": {
+ input: "jms",
+ expected: JMS,
+ },
+ "nfs": {
+ input: "nfs",
+ expected: NFS,
+ },
+ "smb": {
+ input: "smb",
+ expected: SMB,
+ },
+ "smb-encrypted": {
+ input: "smb-encrypted",
+ expected: SmbEncrypted,
+ },
+ "local-file-access": {
+ input: "local-file-access",
+ expected: LocalFileAccess,
+ },
+ "nrpe": {
+ input: "nrpe",
+ expected: NRPE,
+ },
+ "xmpp": {
+ input: "xmpp",
+ expected: XMPP,
+ },
+ "iiop": {
+ input: "iiop",
+ expected: IIOP,
+ },
+ "iiop-encrypted": {
+ input: "iiop-encrypted",
+ expected: IiopEncrypted,
+ },
+ "jrmp": {
+ input: "jrmp",
+ expected: JRMP,
+ },
+ "jrmp-encrypted": {
+ input: "jrmp-encrypted",
+ expected: JrmpEncrypted,
+ },
+ "in-process-library-call": {
+ input: "in-process-library-call",
+ expected: InProcessLibraryCall,
+ },
+ "container-spawning": {
+ input: "container-spawning",
+ expected: ContainerSpawning,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseProtocol(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/quantity.go b/pkg/security/types/quantity.go
new file mode 100644
index 00000000..f4942267
--- /dev/null
+++ b/pkg/security/types/quantity.go
@@ -0,0 +1,110 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Quantity int
+
+const (
+ VeryFew Quantity = iota
+ Few
+ Many
+ VeryMany
+)
+
+func QuantityValues() []TypeEnum {
+ return []TypeEnum{
+ VeryFew,
+ Few,
+ Many,
+ VeryMany,
+ }
+}
+
+func ParseQuantity(value string) (quantity Quantity, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range QuantityValues() {
+ if candidate.String() == value {
+ return candidate.(Quantity), err
+ }
+ }
+ return quantity, errors.New("Unable to parse into type: " + value)
+}
+
+var QuantityTypeDescription = [...]TypeDescription{
+ {"very-few", "Very few"},
+ {"few", "Few"},
+ {"many", "Many"},
+ {"very-many", "Very many"},
+}
+
+func (what Quantity) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return QuantityTypeDescription[what].Name
+}
+
+func (what Quantity) Explain() string {
+ return QuantityTypeDescription[what].Description
+}
+
+func (what Quantity) Title() string {
+ return [...]string{"very few", "few", "many", "very many"}[what]
+}
+
+func (what Quantity) QuantityFactor() float64 {
+ // fibonacci starting at 1
+ return [...]float64{1, 2, 3, 5}[what]
+}
+
+func (what Quantity) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Quantity) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Quantity) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Quantity) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Quantity) find(value string) (Quantity, error) {
+ for index, description := range QuantityTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Quantity(index), nil
+ }
+ }
+
+ return Quantity(0), fmt.Errorf("unknown quantity value %q", value)
+}
diff --git a/pkg/security/types/quantity_test.go b/pkg/security/types/quantity_test.go
new file mode 100644
index 00000000..1ddfb52c
--- /dev/null
+++ b/pkg/security/types/quantity_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseQuantityTest struct {
+ input string
+ expected Quantity
+ expectedError error
+}
+
+func TestParseQuantity(t *testing.T) {
+ testCases := map[string]ParseQuantityTest{
+ "very-few": {
+ input: "very-few",
+ expected: VeryFew,
+ },
+ "few": {
+ input: "few",
+ expected: Few,
+ },
+ "many": {
+ input: "many",
+ expected: Many,
+ },
+ "very-many": {
+ input: "very-many",
+ expected: VeryMany,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseQuantity(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risk-category.go b/pkg/security/types/risk-category.go
new file mode 100644
index 00000000..58a17b2c
--- /dev/null
+++ b/pkg/security/types/risk-category.go
@@ -0,0 +1,21 @@
+package types
+
+type RiskCategory struct {
+ // TODO: refactor all "Id" here and elsewhere to "ID"
+ Id string `json:"id,omitempty" yaml:"id,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ Impact string `json:"impact,omitempty" yaml:"impact,omitempty"`
+ ASVS string `json:"asvs,omitempty" yaml:"asvs,omitempty"`
+ CheatSheet string `json:"cheat_sheet,omitempty" yaml:"cheat_sheet,omitempty"`
+ Action string `json:"action,omitempty" yaml:"action,omitempty"`
+ Mitigation string `json:"mitigation,omitempty" yaml:"mitigation,omitempty"`
+ Check string `json:"check,omitempty" yaml:"check,omitempty"`
+ DetectionLogic string `json:"detection_logic,omitempty" yaml:"detection_logic,omitempty"`
+ RiskAssessment string `json:"risk_assessment,omitempty" yaml:"risk_assessment,omitempty"`
+ FalsePositives string `json:"false_positives,omitempty" yaml:"false_positives,omitempty"`
+ Function RiskFunction `json:"function,omitempty" yaml:"function,omitempty"`
+ STRIDE STRIDE `json:"stride,omitempty" yaml:"stride,omitempty"`
+ ModelFailurePossibleReason bool `json:"model_failure_possible_reason,omitempty" yaml:"model_failure_possible_reason,omitempty"`
+ CWE int `json:"cwe,omitempty" yaml:"cwe,omitempty"`
+}
diff --git a/pkg/security/types/risk-tracking.go b/pkg/security/types/risk-tracking.go
new file mode 100644
index 00000000..fc464226
--- /dev/null
+++ b/pkg/security/types/risk-tracking.go
@@ -0,0 +1,10 @@
+package types
+
+type RiskTracking struct {
+ SyntheticRiskId string `json:"synthetic_risk_id,omitempty" yaml:"synthetic_risk_id,omitempty"`
+ Justification string `json:"justification,omitempty" yaml:"justification,omitempty"`
+ Ticket string `json:"ticket,omitempty" yaml:"ticket,omitempty"`
+ CheckedBy string `json:"checked_by,omitempty" yaml:"checked_by,omitempty"`
+ Status RiskStatus `json:"status,omitempty" yaml:"status,omitempty"`
+ Date Date `json:"date,omitempty" yaml:"date,omitempty"`
+}
diff --git a/pkg/security/types/risk.go b/pkg/security/types/risk.go
new file mode 100644
index 00000000..c98863ed
--- /dev/null
+++ b/pkg/security/types/risk.go
@@ -0,0 +1,41 @@
+package types
+
+type Risk struct {
+ CategoryId string `yaml:"category,omitempty" json:"category,omitempty"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically
+ RiskStatus RiskStatus `yaml:"risk_status,omitempty" json:"risk_status,omitempty"` // used for better JSON marshalling, is assigned in risk evaluation phase automatically
+ Severity RiskSeverity `yaml:"severity,omitempty" json:"severity,omitempty"`
+ ExploitationLikelihood RiskExploitationLikelihood `yaml:"exploitation_likelihood,omitempty" json:"exploitation_likelihood,omitempty"`
+ ExploitationImpact RiskExploitationImpact `yaml:"exploitation_impact,omitempty" json:"exploitation_impact,omitempty"`
+ Title string `yaml:"title,omitempty" json:"title,omitempty"`
+ SyntheticId string `yaml:"synthetic_id,omitempty" json:"synthetic_id,omitempty"`
+ MostRelevantDataAssetId string `yaml:"most_relevant_data_asset,omitempty" json:"most_relevant_data_asset,omitempty"`
+ MostRelevantTechnicalAssetId string `yaml:"most_relevant_technical_asset,omitempty" json:"most_relevant_technical_asset,omitempty"`
+ MostRelevantTrustBoundaryId string `yaml:"most_relevant_trust_boundary,omitempty" json:"most_relevant_trust_boundary,omitempty"`
+ MostRelevantSharedRuntimeId string `yaml:"most_relevant_shared_runtime,omitempty" json:"most_relevant_shared_runtime,omitempty"`
+ MostRelevantCommunicationLinkId string `yaml:"most_relevant_communication_link,omitempty" json:"most_relevant_communication_link,omitempty"`
+ DataBreachProbability DataBreachProbability `yaml:"data_breach_probability,omitempty" json:"data_breach_probability,omitempty"`
+ DataBreachTechnicalAssetIDs []string `yaml:"data_breach_technical_assets,omitempty" json:"data_breach_technical_assets,omitempty"`
+ // TODO: refactor all "Id" here to "ID"?
+}
+
+func (what Risk) GetRiskTracking(model *ParsedModel) RiskTracking { // TODO: Unify function naming regarding Get etc.
+ var result RiskTracking
+ if riskTracking, ok := model.RiskTracking[what.SyntheticId]; ok {
+ result = riskTracking
+ }
+ return result
+}
+
+func (what Risk) GetRiskTrackingStatusDefaultingUnchecked(model *ParsedModel) RiskStatus {
+ if riskTracking, ok := model.RiskTracking[what.SyntheticId]; ok {
+ return riskTracking.Status
+ }
+ return Unchecked
+}
+
+func (what Risk) IsRiskTracked(model *ParsedModel) bool {
+ if _, ok := model.RiskTracking[what.SyntheticId]; ok {
+ return true
+ }
+ return false
+}
diff --git a/pkg/security/types/risk_exploitation_impact.go b/pkg/security/types/risk_exploitation_impact.go
new file mode 100644
index 00000000..3fb7f6c3
--- /dev/null
+++ b/pkg/security/types/risk_exploitation_impact.go
@@ -0,0 +1,112 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type RiskExploitationImpact int
+
+const (
+ LowImpact RiskExploitationImpact = iota
+ MediumImpact
+ HighImpact
+ VeryHighImpact
+)
+
+func RiskExploitationImpactValues() []TypeEnum {
+ return []TypeEnum{
+ LowImpact,
+ MediumImpact,
+ HighImpact,
+ VeryHighImpact,
+ }
+}
+
+var RiskExploitationImpactTypeDescription = [...]TypeDescription{
+ {"low", "Low"},
+ {"medium", "Medium"},
+ {"high", "High"},
+ {"very-high", "Very High"},
+}
+
+func ParseRiskExploitationImpact(value string) (riskExploitationImpact RiskExploitationImpact, err error) {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return MediumImpact, nil
+ }
+ for _, candidate := range RiskExploitationImpactValues() {
+ if candidate.String() == value {
+ return candidate.(RiskExploitationImpact), err
+ }
+ }
+ return riskExploitationImpact, errors.New("Unable to parse into type: " + value)
+}
+
+func (what RiskExploitationImpact) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return RiskExploitationImpactTypeDescription[what].Name
+}
+
+func (what RiskExploitationImpact) Explain() string {
+ return RiskExploitationImpactTypeDescription[what].Description
+}
+
+func (what RiskExploitationImpact) Title() string {
+ return [...]string{"Low", "Medium", "High", "Very High"}[what]
+}
+
+func (what RiskExploitationImpact) Weight() int {
+ return [...]int{1, 2, 3, 4}[what]
+}
+
+func (what RiskExploitationImpact) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *RiskExploitationImpact) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskExploitationImpact) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *RiskExploitationImpact) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskExploitationImpact) find(value string) (RiskExploitationImpact, error) {
+ for index, description := range RiskExploitationImpactTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return RiskExploitationImpact(index), nil
+ }
+ }
+
+ return RiskExploitationImpact(0), fmt.Errorf("unknown risk exploitation impact value %q", value)
+}
diff --git a/pkg/security/types/risk_exploitation_impact_test.go b/pkg/security/types/risk_exploitation_impact_test.go
new file mode 100644
index 00000000..aa4a03cb
--- /dev/null
+++ b/pkg/security/types/risk_exploitation_impact_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseRiskExploitationImpactTest struct {
+ input string
+ expected RiskExploitationImpact
+ expectedError error
+}
+
+func TestParseRiskExploitationImpact(t *testing.T) {
+ testCases := map[string]ParseRiskExploitationImpactTest{
+ "low": {
+ input: "low",
+ expected: LowImpact,
+ },
+ "medium": {
+ input: "medium",
+ expected: MediumImpact,
+ },
+ "high": {
+ input: "high",
+ expected: HighImpact,
+ },
+ "very-high": {
+ input: "very-high",
+ expected: VeryHighImpact,
+ },
+ "default": {
+ input: "",
+ expected: MediumImpact,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseRiskExploitationImpact(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risk_exploitation_likelihood.go b/pkg/security/types/risk_exploitation_likelihood.go
new file mode 100644
index 00000000..d9db35b9
--- /dev/null
+++ b/pkg/security/types/risk_exploitation_likelihood.go
@@ -0,0 +1,112 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type RiskExploitationLikelihood int
+
+const (
+ Unlikely RiskExploitationLikelihood = iota
+ Likely
+ VeryLikely
+ Frequent
+)
+
+func RiskExploitationLikelihoodValues() []TypeEnum {
+ return []TypeEnum{
+ Unlikely,
+ Likely,
+ VeryLikely,
+ Frequent,
+ }
+}
+
+var RiskExploitationLikelihoodTypeDescription = [...]TypeDescription{
+ {"unlikely", "Unlikely"},
+ {"likely", "Likely"},
+ {"very-likely", "Very-Likely"},
+ {"frequent", "Frequent"},
+}
+
+func ParseRiskExploitationLikelihood(value string) (riskExploitationLikelihood RiskExploitationLikelihood, err error) {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return Likely, nil
+ }
+ for _, candidate := range RiskExploitationLikelihoodValues() {
+ if candidate.String() == value {
+ return candidate.(RiskExploitationLikelihood), err
+ }
+ }
+ return riskExploitationLikelihood, errors.New("Unable to parse into type: " + value)
+}
+
+func (what RiskExploitationLikelihood) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return RiskExploitationLikelihoodTypeDescription[what].Name
+}
+
+func (what RiskExploitationLikelihood) Explain() string {
+ return RiskExploitationLikelihoodTypeDescription[what].Description
+}
+
+func (what RiskExploitationLikelihood) Title() string {
+ return [...]string{"Unlikely", "Likely", "Very Likely", "Frequent"}[what]
+}
+
+func (what RiskExploitationLikelihood) Weight() int {
+ return [...]int{1, 2, 3, 4}[what]
+}
+
+func (what RiskExploitationLikelihood) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *RiskExploitationLikelihood) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskExploitationLikelihood) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *RiskExploitationLikelihood) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskExploitationLikelihood) find(value string) (RiskExploitationLikelihood, error) {
+ for index, description := range RiskExploitationLikelihoodTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return RiskExploitationLikelihood(index), nil
+ }
+ }
+
+ return RiskExploitationLikelihood(0), fmt.Errorf("unknown risk exploration likelihood value %q", value)
+}
diff --git a/pkg/security/types/risk_exploitation_likelihood_test.go b/pkg/security/types/risk_exploitation_likelihood_test.go
new file mode 100644
index 00000000..0c5f060e
--- /dev/null
+++ b/pkg/security/types/risk_exploitation_likelihood_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseRiskExploitationLikelihoodTest struct {
+ input string
+ expected RiskExploitationLikelihood
+ expectedError error
+}
+
+func TestParseRiskExploitationLikelihood(t *testing.T) {
+ testCases := map[string]ParseRiskExploitationLikelihoodTest{
+ "unlikely": {
+ input: "unlikely",
+ expected: Unlikely,
+ },
+ "likely": {
+ input: "likely",
+ expected: Likely,
+ },
+ "very-likely": {
+ input: "very-likely",
+ expected: VeryLikely,
+ },
+ "frequent": {
+ input: "frequent",
+ expected: Frequent,
+ },
+ "default": {
+ input: "",
+ expected: Likely,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseRiskExploitationLikelihood(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risk_function.go b/pkg/security/types/risk_function.go
new file mode 100644
index 00000000..8a843591
--- /dev/null
+++ b/pkg/security/types/risk_function.go
@@ -0,0 +1,105 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type RiskFunction int
+
+const (
+ BusinessSide RiskFunction = iota
+ Architecture
+ Development
+ Operations
+)
+
+func RiskFunctionValues() []TypeEnum {
+ return []TypeEnum{
+ BusinessSide,
+ Architecture,
+ Development,
+ Operations,
+ }
+}
+
+var RiskFunctionTypeDescription = [...]TypeDescription{
+ {"business-side", "Business"},
+ {"architecture", "Architecture"},
+ {"development", "Development"},
+ {"operations", "Operations"},
+}
+
+func ParseRiskFunction(value string) (riskFunction RiskFunction, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range RiskFunctionValues() {
+ if candidate.String() == value {
+ return candidate.(RiskFunction), err
+ }
+ }
+ return riskFunction, errors.New("Unable to parse into type: " + value)
+}
+
+func (what RiskFunction) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return RiskFunctionTypeDescription[what].Name
+}
+
+func (what RiskFunction) Explain() string {
+ return RiskFunctionTypeDescription[what].Description
+}
+
+func (what RiskFunction) Title() string {
+ return [...]string{"Business Side", "Architecture", "Development", "Operations"}[what]
+}
+
+func (what RiskFunction) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *RiskFunction) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskFunction) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *RiskFunction) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskFunction) find(value string) (RiskFunction, error) {
+ for index, description := range RiskFunctionTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return RiskFunction(index), nil
+ }
+ }
+
+ return RiskFunction(0), fmt.Errorf("unknown risk function value %q", value)
+}
diff --git a/pkg/security/types/risk_function_test.go b/pkg/security/types/risk_function_test.go
new file mode 100644
index 00000000..68703188
--- /dev/null
+++ b/pkg/security/types/risk_function_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseRiskFunctionTest struct {
+ input string
+ expected RiskFunction
+ expectedError error
+}
+
+func TestParseRiskFunction(t *testing.T) {
+ testCases := map[string]ParseRiskFunctionTest{
+ "business-side": {
+ input: "business-side",
+ expected: BusinessSide,
+ },
+ "architecture": {
+ input: "architecture",
+ expected: Architecture,
+ },
+ "development": {
+ input: "development",
+ expected: Development,
+ },
+ "operations": {
+ input: "operations",
+ expected: Operations,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseRiskFunction(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risk_severity.go b/pkg/security/types/risk_severity.go
new file mode 100644
index 00000000..41a665b8
--- /dev/null
+++ b/pkg/security/types/risk_severity.go
@@ -0,0 +1,111 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type RiskSeverity int
+
+const (
+ LowSeverity RiskSeverity = iota
+ MediumSeverity
+ ElevatedSeverity
+ HighSeverity
+ CriticalSeverity
+)
+
+func RiskSeverityValues() []TypeEnum {
+ return []TypeEnum{
+ LowSeverity,
+ MediumSeverity,
+ ElevatedSeverity,
+ HighSeverity,
+ CriticalSeverity,
+ }
+}
+
+var RiskSeverityTypeDescription = [...]TypeDescription{
+ {"low", "Low"},
+ {"medium", "Medium"},
+ {"elevated", "Elevated"},
+ {"high", "High"},
+ {"critical", "Critical"},
+}
+
+func ParseRiskSeverity(value string) (riskSeverity RiskSeverity, err error) {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return MediumSeverity, nil
+ }
+ for _, candidate := range RiskSeverityValues() {
+ if candidate.String() == value {
+ return candidate.(RiskSeverity), err
+ }
+ }
+ return riskSeverity, errors.New("Unable to parse into type: " + value)
+}
+
+func (what RiskSeverity) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return RiskSeverityTypeDescription[what].Name
+}
+
+func (what RiskSeverity) Explain() string {
+ return RiskSeverityTypeDescription[what].Description
+}
+
+func (what RiskSeverity) Title() string {
+ return [...]string{"Low", "Medium", "Elevated", "High", "Critical"}[what]
+}
+
+func (what RiskSeverity) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *RiskSeverity) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskSeverity) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *RiskSeverity) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskSeverity) find(value string) (RiskSeverity, error) {
+ for index, description := range RiskSeverityTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return RiskSeverity(index), nil
+ }
+ }
+
+ return RiskSeverity(0), fmt.Errorf("unknown risk severity value %q", value)
+}
diff --git a/pkg/security/types/risk_severity_test.go b/pkg/security/types/risk_severity_test.go
new file mode 100644
index 00000000..4a987eb8
--- /dev/null
+++ b/pkg/security/types/risk_severity_test.go
@@ -0,0 +1,60 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseRiskSeverityTest struct {
+ input string
+ expected RiskSeverity
+ expectedError error
+}
+
+func TestParseRiskSeverity(t *testing.T) {
+ testCases := map[string]ParseRiskSeverityTest{
+ "low": {
+ input: "low",
+ expected: LowSeverity,
+ },
+ "medium": {
+ input: "medium",
+ expected: MediumSeverity,
+ },
+ "elevated": {
+ input: "elevated",
+ expected: ElevatedSeverity,
+ },
+ "high": {
+ input: "high",
+ expected: HighSeverity,
+ },
+ "critical": {
+ input: "critical",
+ expected: CriticalSeverity,
+ },
+ "default": {
+ input: "",
+ expected: MediumSeverity,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseRiskSeverity(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risk_status.go b/pkg/security/types/risk_status.go
new file mode 100644
index 00000000..1f024219
--- /dev/null
+++ b/pkg/security/types/risk_status.go
@@ -0,0 +1,115 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type RiskStatus int
+
+const (
+ Unchecked RiskStatus = iota
+ InDiscussion
+ Accepted
+ InProgress
+ Mitigated
+ FalsePositive
+)
+
+func RiskStatusValues() []TypeEnum {
+ return []TypeEnum{
+ Unchecked,
+ InDiscussion,
+ Accepted,
+ InProgress,
+ Mitigated,
+ FalsePositive,
+ }
+}
+
+var RiskStatusTypeDescription = [...]TypeDescription{
+ {"unchecked", "Risk has not yet been reviewed"},
+ {"in-discussion", "Risk is currently being discussed (during review)"},
+ {"accepted", "Risk has been accepted (as possibly a corporate risk acceptance process defines)"},
+ {"in-progress", "Risk mitigation is currently in progress"},
+ {"mitigated", "Risk has been mitigated"},
+ {"false-positive", "Risk is a false positive (i.e. no risk at all or not applicable)"},
+}
+
+func ParseRiskStatus(value string) (riskStatus RiskStatus, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range RiskStatusValues() {
+ if candidate.String() == value {
+ return candidate.(RiskStatus), err
+ }
+ }
+ return riskStatus, errors.New("Unable to parse into type: " + value)
+}
+
+func (what RiskStatus) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return RiskStatusTypeDescription[what].Name
+}
+
+func (what RiskStatus) Explain() string {
+ return RiskStatusTypeDescription[what].Description
+}
+
+func (what RiskStatus) Title() string {
+ return [...]string{"Unchecked", "in Discussion", "Accepted", "in Progress", "Mitigated", "False Positive"}[what]
+}
+
+func (what RiskStatus) IsStillAtRisk() bool {
+ return what == Unchecked || what == InDiscussion || what == Accepted || what == InProgress
+}
+
+func (what RiskStatus) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *RiskStatus) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskStatus) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *RiskStatus) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what RiskStatus) find(value string) (RiskStatus, error) {
+ for index, description := range RiskStatusTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return RiskStatus(index), nil
+ }
+ }
+
+ return RiskStatus(0), fmt.Errorf("unknown risk status value %q", value)
+}
diff --git a/pkg/security/types/risk_status_test.go b/pkg/security/types/risk_status_test.go
new file mode 100644
index 00000000..989ef3f3
--- /dev/null
+++ b/pkg/security/types/risk_status_test.go
@@ -0,0 +1,60 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseRiskStatusTest struct {
+ input string
+ expected RiskStatus
+ expectedError error
+}
+
+func TestParseRiskStatus(t *testing.T) {
+ testCases := map[string]ParseRiskStatusTest{
+ "unchecked": {
+ input: "unchecked",
+ expected: Unchecked,
+ },
+ "in-discussion": {
+ input: "in-discussion",
+ expected: InDiscussion,
+ },
+ "accepted": {
+ input: "accepted",
+ expected: Accepted,
+ },
+ "in-progress": {
+ input: "in-progress",
+ expected: InProgress,
+ },
+ "mitigated": {
+ input: "mitigated",
+ expected: Mitigated,
+ },
+ "false-positive": {
+ input: "false-positive",
+ expected: FalsePositive,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseRiskStatus(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/risks.go b/pkg/security/types/risks.go
new file mode 100644
index 00000000..72bb3569
--- /dev/null
+++ b/pkg/security/types/risks.go
@@ -0,0 +1,828 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "sort"
+)
+
+func GetRiskCategory(parsedModel *ParsedModel, categoryID string) *RiskCategory {
+ if len(parsedModel.IndividualRiskCategories) > 0 {
+ custom, customOk := parsedModel.IndividualRiskCategories[categoryID]
+ if customOk {
+ return &custom
+ }
+ }
+
+ if len(parsedModel.BuiltInRiskCategories) > 0 {
+ builtIn, builtInOk := parsedModel.BuiltInRiskCategories[categoryID]
+ if builtInOk {
+ return &builtIn
+ }
+ }
+
+ return nil
+}
+
+func GetRiskCategories(parsedModel *ParsedModel, categoryIDs []string) []RiskCategory {
+ categoryMap := make(map[string]RiskCategory)
+ for _, categoryId := range categoryIDs {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category != nil {
+ categoryMap[categoryId] = *category
+ }
+ }
+
+ categories := make([]RiskCategory, 0)
+ for categoryId := range categoryMap {
+ categories = append(categories, categoryMap[categoryId])
+ }
+
+ return categories
+}
+
+func AllRisks(parsedModel *ParsedModel) []Risk {
+ result := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ result = append(result, risk)
+ }
+ }
+ return result
+}
+
+func ReduceToOnlyStillAtRisk(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func HighestExploitationLikelihood(risks []Risk) RiskExploitationLikelihood {
+ result := Unlikely
+ for _, risk := range risks {
+ if risk.ExploitationLikelihood > result {
+ result = risk.ExploitationLikelihood
+ }
+ }
+ return result
+}
+
+func HighestExploitationImpact(risks []Risk) RiskExploitationImpact {
+ result := LowImpact
+ for _, risk := range risks {
+ if risk.ExploitationImpact > result {
+ result = risk.ExploitationImpact
+ }
+ }
+ return result
+}
+
+func HighestSeverityStillAtRisk(model *ParsedModel, risks []Risk) RiskSeverity {
+ result := LowSeverity
+ for _, risk := range risks {
+ if risk.Severity > result && risk.GetRiskTrackingStatusDefaultingUnchecked(model).IsStillAtRisk() {
+ result = risk.Severity
+ }
+ }
+ return result
+}
+
+type ByRiskCategoryTitleSort []RiskCategory
+
+func (what ByRiskCategoryTitleSort) Len() int { return len(what) }
+func (what ByRiskCategoryTitleSort) Swap(i, j int) {
+ what[i], what[j] = what[j], what[i]
+}
+func (what ByRiskCategoryTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
+
+func SortByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk(parsedModel *ParsedModel, riskCategories []RiskCategory) {
+ sort.Slice(riskCategories, func(i, j int) bool {
+ risksLeft := ReduceToOnlyStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[riskCategories[i].Id])
+ risksRight := ReduceToOnlyStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[riskCategories[j].Id])
+ highestLeft := HighestSeverityStillAtRisk(parsedModel, risksLeft)
+ highestRight := HighestSeverityStillAtRisk(parsedModel, risksRight)
+ if highestLeft == highestRight {
+ if len(risksLeft) == 0 && len(risksRight) > 0 {
+ return false
+ }
+ if len(risksLeft) > 0 && len(risksRight) == 0 {
+ return true
+ }
+ return riskCategories[i].Title < riskCategories[j].Title
+ }
+ return highestLeft > highestRight
+ })
+}
+
+type RiskStatistics struct {
+ // TODO add also some more like before / after (i.e. with mitigation applied)
+ Risks map[string]map[string]int `yaml:"risks" json:"risks"`
+}
+
+func SortByRiskSeverity(risks []Risk, parsedModel *ParsedModel) {
+ sort.Slice(risks, func(i, j int) bool {
+ if risks[i].Severity == risks[j].Severity {
+ trackingStatusLeft := risks[i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
+ trackingStatusRight := risks[j].GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
+ if trackingStatusLeft == trackingStatusRight {
+ impactLeft := risks[i].ExploitationImpact
+ impactRight := risks[j].ExploitationImpact
+ if impactLeft == impactRight {
+ likelihoodLeft := risks[i].ExploitationLikelihood
+ likelihoodRight := risks[j].ExploitationLikelihood
+ if likelihoodLeft == likelihoodRight {
+ return risks[i].Title < risks[j].Title
+ } else {
+ return likelihoodLeft > likelihoodRight
+ }
+ } else {
+ return impactLeft > impactRight
+ }
+ } else {
+ return trackingStatusLeft < trackingStatusRight
+ }
+ }
+ return risks[i].Severity > risks[j].Severity
+
+ })
+}
+
+func SortByDataBreachProbability(risks []Risk, parsedModel *ParsedModel) {
+ sort.Slice(risks, func(i, j int) bool {
+
+ if risks[i].DataBreachProbability == risks[j].DataBreachProbability {
+ trackingStatusLeft := risks[i].GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
+ trackingStatusRight := risks[j].GetRiskTrackingStatusDefaultingUnchecked(parsedModel)
+ if trackingStatusLeft == trackingStatusRight {
+ return risks[i].Title < risks[j].Title
+ } else {
+ return trackingStatusLeft < trackingStatusRight
+ }
+ }
+ return risks[i].DataBreachProbability > risks[j].DataBreachProbability
+ })
+}
+
+// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way:
+
+func SortedRiskCategories(parsedModel *ParsedModel) []RiskCategory {
+ categoryMap := make(map[string]RiskCategory)
+ for categoryId := range parsedModel.GeneratedRisksByCategory {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category != nil {
+ categoryMap[categoryId] = *category
+ }
+ }
+
+ categories := make([]RiskCategory, 0)
+ for categoryId := range categoryMap {
+ categories = append(categories, categoryMap[categoryId])
+ }
+
+ SortByRiskCategoryHighestContainingRiskSeveritySortStillAtRisk(parsedModel, categories)
+ return categories
+}
+
+func SortedRisksOfCategory(parsedModel *ParsedModel, category RiskCategory) []Risk {
+ risks := parsedModel.GeneratedRisksByCategory[category.Id]
+ SortByRiskSeverity(risks, parsedModel)
+ return risks
+}
+
+func CountRisks(risksByCategory map[string][]Risk) int {
+ result := 0
+ for _, risks := range risksByCategory {
+ result += len(risks)
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDESpoofing(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category != nil {
+ if category.STRIDE == Spoofing {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDETampering(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category != nil {
+ if category.STRIDE == Tampering {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDERepudiation(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == Repudiation {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDEInformationDisclosure(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == InformationDisclosure {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDEDenialOfService(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == DenialOfService {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlySTRIDEElevationOfPrivilege(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.STRIDE == ElevationOfPrivilege {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlyBusinessSide(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == BusinessSide {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlyArchitecture(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Architecture {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlyDevelopment(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Development {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func RisksOfOnlyOperation(parsedModel *ParsedModel, risksByCategory map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Operations {
+ result[categoryId] = append(result[categoryId], risk)
+ }
+ }
+ }
+ return result
+}
+
+func CategoriesOfOnlyRisksStillAtRisk(parsedModel *ParsedModel, risksByCategory map[string][]Risk) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ categories[categoryId] = struct{}{}
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func CategoriesOfOnlyCriticalRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ if risk.Severity == CriticalSeverity {
+ categories[categoryId] = struct{}{}
+ }
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func CategoriesOfOnlyHighRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId])
+ if !initialRisks {
+ highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId])
+ }
+ if risk.Severity == HighSeverity && highest < CriticalSeverity {
+ categories[categoryId] = struct{}{}
+ }
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func CategoriesOfOnlyElevatedRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId])
+ if !initialRisks {
+ highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId])
+ }
+ if risk.Severity == ElevatedSeverity && highest < HighSeverity {
+ categories[categoryId] = struct{}{}
+ }
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func CategoriesOfOnlyMediumRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId])
+ if !initialRisks {
+ highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId])
+ }
+ if risk.Severity == MediumSeverity && highest < ElevatedSeverity {
+ categories[categoryId] = struct{}{}
+ }
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func CategoriesOfOnlyLowRisks(parsedModel *ParsedModel, risksByCategory map[string][]Risk, initialRisks bool) []string {
+ categories := make(map[string]struct{}) // Go's trick of unique elements is a map
+ for categoryId, risks := range risksByCategory {
+ for _, risk := range risks {
+ if !initialRisks && !risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ continue
+ }
+ highest := HighestSeverity(parsedModel.GeneratedRisksByCategory[categoryId])
+ if !initialRisks {
+ highest = HighestSeverityStillAtRisk(parsedModel, parsedModel.GeneratedRisksByCategory[categoryId])
+ }
+ if risk.Severity == LowSeverity && highest < MediumSeverity {
+ categories[categoryId] = struct{}{}
+ }
+ }
+ }
+ // return as slice (of now unique values)
+ return keysAsSlice(categories)
+}
+
+func HighestSeverity(risks []Risk) RiskSeverity {
+ result := LowSeverity
+ for _, risk := range risks {
+ if risk.Severity > result {
+ result = risk.Severity
+ }
+ }
+ return result
+}
+
+func keysAsSlice(categories map[string]struct{}) []string {
+ result := make([]string, 0, len(categories))
+ for k := range categories {
+ result = append(result, k)
+ }
+ return result
+}
+
+func FilteredByOnlyBusinessSide(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for categoryId, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == BusinessSide {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyArchitecture(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for categoryId, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Architecture {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyDevelopment(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for categoryId, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Development {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyOperation(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for categoryId, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.Function == Operations {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyCriticalRisks(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.Severity == CriticalSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyHighRisks(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.Severity == HighSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyElevatedRisks(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.Severity == ElevatedSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyMediumRisks(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.Severity == MediumSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByOnlyLowRisks(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.Severity == LowSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilterByModelFailures(parsedModel *ParsedModel, risksByCat map[string][]Risk) map[string][]Risk {
+ result := make(map[string][]Risk)
+ for categoryId, risks := range risksByCat {
+ category := GetRiskCategory(parsedModel, categoryId)
+ if category.ModelFailurePossibleReason {
+ result[categoryId] = risks
+ }
+ }
+
+ return result
+}
+
+func FlattenRiskSlice(risksByCat map[string][]Risk) []Risk {
+ result := make([]Risk, 0)
+ for _, risks := range risksByCat {
+ result = append(result, risks...)
+ }
+ return result
+}
+
+func TotalRiskCount(parsedModel *ParsedModel) int {
+ count := 0
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ count += len(risks)
+ }
+ return count
+}
+
+func FilteredByRiskTrackingUnchecked(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Unchecked {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByRiskTrackingInDiscussion(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InDiscussion {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByRiskTrackingAccepted(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Accepted {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByRiskTrackingInProgress(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InProgress {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByRiskTrackingMitigated(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Mitigated {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByRiskTrackingFalsePositive(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == FalsePositive {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyHighRisk(risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.Severity == HighSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyMediumRisk(risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.Severity == MediumSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyLowRisk(risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.Severity == LowSeverity {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingUnchecked(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Unchecked {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingInDiscussion(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InDiscussion {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingAccepted(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Accepted {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingInProgress(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == InProgress {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingMitigated(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == Mitigated {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func ReduceToOnlyRiskTrackingFalsePositive(parsedModel *ParsedModel, risks []Risk) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel) == FalsePositive {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ return filteredRisks
+}
+
+func FilteredByStillAtRisk(parsedModel *ParsedModel) []Risk {
+ filteredRisks := make([]Risk, 0)
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ if risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).IsStillAtRisk() {
+ filteredRisks = append(filteredRisks, risk)
+ }
+ }
+ }
+ return filteredRisks
+}
+
+func OverallRiskStatistics(parsedModel *ParsedModel) RiskStatistics {
+ result := RiskStatistics{}
+ result.Risks = make(map[string]map[string]int)
+ result.Risks[CriticalSeverity.String()] = make(map[string]int)
+ result.Risks[CriticalSeverity.String()][Unchecked.String()] = 0
+ result.Risks[CriticalSeverity.String()][InDiscussion.String()] = 0
+ result.Risks[CriticalSeverity.String()][Accepted.String()] = 0
+ result.Risks[CriticalSeverity.String()][InProgress.String()] = 0
+ result.Risks[CriticalSeverity.String()][Mitigated.String()] = 0
+ result.Risks[CriticalSeverity.String()][FalsePositive.String()] = 0
+ result.Risks[HighSeverity.String()] = make(map[string]int)
+ result.Risks[HighSeverity.String()][Unchecked.String()] = 0
+ result.Risks[HighSeverity.String()][InDiscussion.String()] = 0
+ result.Risks[HighSeverity.String()][Accepted.String()] = 0
+ result.Risks[HighSeverity.String()][InProgress.String()] = 0
+ result.Risks[HighSeverity.String()][Mitigated.String()] = 0
+ result.Risks[HighSeverity.String()][FalsePositive.String()] = 0
+ result.Risks[ElevatedSeverity.String()] = make(map[string]int)
+ result.Risks[ElevatedSeverity.String()][Unchecked.String()] = 0
+ result.Risks[ElevatedSeverity.String()][InDiscussion.String()] = 0
+ result.Risks[ElevatedSeverity.String()][Accepted.String()] = 0
+ result.Risks[ElevatedSeverity.String()][InProgress.String()] = 0
+ result.Risks[ElevatedSeverity.String()][Mitigated.String()] = 0
+ result.Risks[ElevatedSeverity.String()][FalsePositive.String()] = 0
+ result.Risks[MediumSeverity.String()] = make(map[string]int)
+ result.Risks[MediumSeverity.String()][Unchecked.String()] = 0
+ result.Risks[MediumSeverity.String()][InDiscussion.String()] = 0
+ result.Risks[MediumSeverity.String()][Accepted.String()] = 0
+ result.Risks[MediumSeverity.String()][InProgress.String()] = 0
+ result.Risks[MediumSeverity.String()][Mitigated.String()] = 0
+ result.Risks[MediumSeverity.String()][FalsePositive.String()] = 0
+ result.Risks[LowSeverity.String()] = make(map[string]int)
+ result.Risks[LowSeverity.String()][Unchecked.String()] = 0
+ result.Risks[LowSeverity.String()][InDiscussion.String()] = 0
+ result.Risks[LowSeverity.String()][Accepted.String()] = 0
+ result.Risks[LowSeverity.String()][InProgress.String()] = 0
+ result.Risks[LowSeverity.String()][Mitigated.String()] = 0
+ result.Risks[LowSeverity.String()][FalsePositive.String()] = 0
+ for _, risks := range parsedModel.GeneratedRisksByCategory {
+ for _, risk := range risks {
+ result.Risks[risk.Severity.String()][risk.GetRiskTrackingStatusDefaultingUnchecked(parsedModel).String()]++
+ }
+ }
+ return result
+}
diff --git a/pkg/security/types/shared_runtime.go b/pkg/security/types/shared_runtime.go
new file mode 100644
index 00000000..c23873a6
--- /dev/null
+++ b/pkg/security/types/shared_runtime.go
@@ -0,0 +1,88 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "sort"
+)
+
+type SharedRuntime struct {
+ Id string `json:"id,omitempty" yaml:"id,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ TechnicalAssetsRunning []string `json:"technical_assets_running,omitempty" yaml:"technical_assets_running,omitempty"`
+}
+
+func (what SharedRuntime) IsTaggedWithAny(tags ...string) bool {
+ return containsCaseInsensitiveAny(what.Tags, tags...)
+}
+
+func (what SharedRuntime) IsTaggedWithBaseTag(baseTag string) bool {
+ return IsTaggedWithBaseTag(what.Tags, baseTag)
+}
+
+func (what SharedRuntime) HighestConfidentiality(model *ParsedModel) Confidentiality {
+ highest := Public
+ for _, id := range what.TechnicalAssetsRunning {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestConfidentiality(model) > highest {
+ highest = techAsset.HighestConfidentiality(model)
+ }
+ }
+ return highest
+}
+
+func (what SharedRuntime) HighestIntegrity(model *ParsedModel) Criticality {
+ highest := Archive
+ for _, id := range what.TechnicalAssetsRunning {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestIntegrity(model) > highest {
+ highest = techAsset.HighestIntegrity(model)
+ }
+ }
+ return highest
+}
+
+func (what SharedRuntime) HighestAvailability(model *ParsedModel) Criticality {
+ highest := Archive
+ for _, id := range what.TechnicalAssetsRunning {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestAvailability(model) > highest {
+ highest = techAsset.HighestAvailability(model)
+ }
+ }
+ return highest
+}
+
+func (what SharedRuntime) TechnicalAssetWithHighestRAA(model *ParsedModel) TechnicalAsset {
+ result := model.TechnicalAssets[what.TechnicalAssetsRunning[0]]
+ for _, asset := range what.TechnicalAssetsRunning {
+ candidate := model.TechnicalAssets[asset]
+ if candidate.RAA > result.RAA {
+ result = candidate
+ }
+ }
+ return result
+}
+
+// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way:
+
+func SortedKeysOfSharedRuntime(model *ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range model.SharedRuntimes {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+type BySharedRuntimeTitleSort []SharedRuntime
+
+func (what BySharedRuntimeTitleSort) Len() int { return len(what) }
+func (what BySharedRuntimeTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what BySharedRuntimeTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
diff --git a/pkg/security/types/stride.go b/pkg/security/types/stride.go
new file mode 100644
index 00000000..c8015e54
--- /dev/null
+++ b/pkg/security/types/stride.go
@@ -0,0 +1,111 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type STRIDE int
+
+const (
+ Spoofing STRIDE = iota
+ Tampering
+ Repudiation
+ InformationDisclosure
+ DenialOfService
+ ElevationOfPrivilege
+)
+
+func STRIDEValues() []TypeEnum {
+ return []TypeEnum{
+ Spoofing,
+ Tampering,
+ Repudiation,
+ InformationDisclosure,
+ DenialOfService,
+ ElevationOfPrivilege,
+ }
+}
+
+var StrideTypeDescription = [...]TypeDescription{
+ {"spoofing", "Spoofing - Authenticity"},
+ {"tampering", "Tampering - Integrity"},
+ {"repudiation", "Repudiation - Non-repudiability"},
+ {"information-disclosure", "Information disclosure - Confidentiality"},
+ {"denial-of-service", "Denial of service - Availability"},
+ {"elevation-of-privilege", "Elevation of privilege - Authorization"},
+}
+
+func ParseSTRIDE(value string) (stride STRIDE, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range STRIDEValues() {
+ if candidate.String() == value {
+ return candidate.(STRIDE), err
+ }
+ }
+ return stride, errors.New("Unable to parse into type: " + value)
+}
+
+func (what STRIDE) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return StrideTypeDescription[what].Name
+}
+
+func (what STRIDE) Explain() string {
+ return StrideTypeDescription[what].Description
+}
+
+func (what STRIDE) Title() string {
+ return [...]string{"Spoofing", "Tampering", "Repudiation", "Information Disclosure", "Denial of Service", "Elevation of Privilege"}[what]
+}
+
+func (what STRIDE) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *STRIDE) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what STRIDE) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *STRIDE) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what STRIDE) find(value string) (STRIDE, error) {
+ for index, description := range StrideTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return STRIDE(index), nil
+ }
+ }
+
+ return STRIDE(0), fmt.Errorf("unknown STRIDE value %q", value)
+}
diff --git a/pkg/security/types/stride_test.go b/pkg/security/types/stride_test.go
new file mode 100644
index 00000000..9f10059e
--- /dev/null
+++ b/pkg/security/types/stride_test.go
@@ -0,0 +1,60 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseStrideTest struct {
+ input string
+ expected STRIDE
+ expectedError error
+}
+
+func TestParseStride(t *testing.T) {
+ testCases := map[string]ParseStrideTest{
+ "spoofing": {
+ input: "spoofing",
+ expected: Spoofing,
+ },
+ "tampering": {
+ input: "tampering",
+ expected: Tampering,
+ },
+ "repudiation": {
+ input: "repudiation",
+ expected: Repudiation,
+ },
+ "information-disclosure": {
+ input: "information-disclosure",
+ expected: InformationDisclosure,
+ },
+ "denial-of-service": {
+ input: "denial-of-service",
+ expected: DenialOfService,
+ },
+ "elevation-of-privilege": {
+ input: "elevation-of-privilege",
+ expected: ElevationOfPrivilege,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseSTRIDE(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/technical_asset.go b/pkg/security/types/technical_asset.go
new file mode 100644
index 00000000..eda65cca
--- /dev/null
+++ b/pkg/security/types/technical_asset.go
@@ -0,0 +1,352 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "fmt"
+ "sort"
+)
+
+type TechnicalAsset struct {
+ Id string `json:"id,omitempty" yaml:"id,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ Usage Usage `json:"usage,omitempty" yaml:"usage,omitempty"`
+ Type TechnicalAssetType `json:"type,omitempty" yaml:"type,omitempty"`
+ Size TechnicalAssetSize `json:"size,omitempty" yaml:"size,omitempty"`
+ Technology TechnicalAssetTechnology `json:"technology,omitempty" yaml:"technology,omitempty"`
+ Machine TechnicalAssetMachine `json:"machine,omitempty" yaml:"machine,omitempty"`
+ Internet bool `json:"internet,omitempty" yaml:"internet,omitempty"`
+ MultiTenant bool `json:"multi_tenant,omitempty" yaml:"multi_tenant,omitempty"`
+ Redundant bool `json:"redundant,omitempty" yaml:"redundant,omitempty"`
+ CustomDevelopedParts bool `json:"custom_developed_parts,omitempty" yaml:"custom_developed_parts,omitempty"`
+ OutOfScope bool `json:"out_of_scope,omitempty" yaml:"out_of_scope,omitempty"`
+ UsedAsClientByHuman bool `json:"used_as_client_by_human,omitempty" yaml:"used_as_client_by_human,omitempty"`
+ Encryption EncryptionStyle `json:"encryption,omitempty" yaml:"encryption,omitempty"`
+ JustificationOutOfScope string `json:"justification_out_of_scope,omitempty" yaml:"justification_out_of_scope,omitempty"`
+ Owner string `json:"owner,omitempty" yaml:"owner,omitempty"`
+ Confidentiality Confidentiality `json:"confidentiality,omitempty" yaml:"confidentiality,omitempty"`
+ Integrity Criticality `json:"integrity,omitempty" yaml:"integrity,omitempty"`
+ Availability Criticality `json:"availability,omitempty" yaml:"availability,omitempty"`
+ JustificationCiaRating string `json:"justification_cia_rating,omitempty" yaml:"justification_cia_rating,omitempty"`
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ DataAssetsProcessed []string `json:"data_assets_processed,omitempty" yaml:"data_assets_processed,omitempty"`
+ DataAssetsStored []string `json:"data_assets_stored,omitempty" yaml:"data_assets_stored,omitempty"`
+ DataFormatsAccepted []DataFormat `json:"data_formats_accepted,omitempty" yaml:"data_formats_accepted,omitempty"`
+ CommunicationLinks []CommunicationLink `json:"communication_links,omitempty" yaml:"communication_links,omitempty"`
+ DiagramTweakOrder int `json:"diagram_tweak_order,omitempty" yaml:"diagram_tweak_order,omitempty"`
+ // will be set by separate calculation step:
+ RAA float64 `json:"raa,omitempty" yaml:"raa,omitempty"`
+}
+
+func (what TechnicalAsset) IsTaggedWithAny(tags ...string) bool {
+ return containsCaseInsensitiveAny(what.Tags, tags...)
+}
+
+func (what TechnicalAsset) IsTaggedWithBaseTag(baseTag string) bool {
+ return IsTaggedWithBaseTag(what.Tags, baseTag)
+}
+
+// first use the tag(s) of the asset itself, then their trust boundaries (recursively up) and then their shared runtime
+
+func (what TechnicalAsset) IsTaggedWithAnyTraversingUp(model *ParsedModel, tags ...string) bool {
+ if containsCaseInsensitiveAny(what.Tags, tags...) {
+ return true
+ }
+ tbID := what.GetTrustBoundaryId(model)
+ if len(tbID) > 0 {
+ if model.TrustBoundaries[tbID].IsTaggedWithAnyTraversingUp(model, tags...) {
+ return true
+ }
+ }
+ for _, sr := range model.SharedRuntimes {
+ if contains(sr.TechnicalAssetsRunning, what.Id) && sr.IsTaggedWithAny(tags...) {
+ return true
+ }
+ }
+ return false
+}
+
+func (what TechnicalAsset) IsSameTrustBoundary(parsedModel *ParsedModel, otherAssetId string) bool {
+ trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id]
+ trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId]
+ return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id
+}
+
+func (what TechnicalAsset) IsSameExecutionEnvironment(parsedModel *ParsedModel, otherAssetId string) bool {
+ trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id]
+ trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId]
+ if trustBoundaryOfMyAsset.Type == ExecutionEnvironment && trustBoundaryOfOtherAsset.Type == ExecutionEnvironment {
+ return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id
+ }
+ return false
+}
+
+func (what TechnicalAsset) IsSameTrustBoundaryNetworkOnly(parsedModel *ParsedModel, otherAssetId string) bool {
+ trustBoundaryOfMyAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[what.Id]
+ if !trustBoundaryOfMyAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then
+ trustBoundaryOfMyAsset = parsedModel.TrustBoundaries[trustBoundaryOfMyAsset.ParentTrustBoundaryID(parsedModel)]
+ }
+ trustBoundaryOfOtherAsset := parsedModel.DirectContainingTrustBoundaryMappedByTechnicalAssetId[otherAssetId]
+ if !trustBoundaryOfOtherAsset.Type.IsNetworkBoundary() { // find and use the parent boundary then
+ trustBoundaryOfOtherAsset = parsedModel.TrustBoundaries[trustBoundaryOfOtherAsset.ParentTrustBoundaryID(parsedModel)]
+ }
+ return trustBoundaryOfMyAsset.Id == trustBoundaryOfOtherAsset.Id
+}
+
+func (what TechnicalAsset) HighestSensitivityScore() float64 {
+ return what.Confidentiality.AttackerAttractivenessForAsset() +
+ what.Integrity.AttackerAttractivenessForAsset() +
+ what.Availability.AttackerAttractivenessForAsset()
+}
+
+func (what TechnicalAsset) HighestConfidentiality(parsedModel *ParsedModel) Confidentiality {
+ highest := what.Confidentiality
+ for _, dataId := range what.DataAssetsProcessed {
+ dataAsset := parsedModel.DataAssets[dataId]
+ if dataAsset.Confidentiality > highest {
+ highest = dataAsset.Confidentiality
+ }
+ }
+ return highest
+}
+
+func (what TechnicalAsset) DataAssetsProcessedSorted(parsedModel *ParsedModel) []DataAsset {
+ result := make([]DataAsset, 0)
+ for _, assetID := range what.DataAssetsProcessed {
+ result = append(result, parsedModel.DataAssets[assetID])
+ }
+ sort.Sort(ByDataAssetTitleSort(result))
+ return result
+}
+
+func (what TechnicalAsset) DataAssetsStoredSorted(parsedModel *ParsedModel) []DataAsset {
+ result := make([]DataAsset, 0)
+ for _, assetID := range what.DataAssetsStored {
+ result = append(result, parsedModel.DataAssets[assetID])
+ }
+ sort.Sort(ByDataAssetTitleSort(result))
+ return result
+}
+
+func (what TechnicalAsset) DataFormatsAcceptedSorted() []DataFormat {
+ result := make([]DataFormat, 0)
+ for _, format := range what.DataFormatsAccepted {
+ result = append(result, format)
+ }
+ sort.Sort(ByDataFormatAcceptedSort(result))
+ return result
+}
+
+func (what TechnicalAsset) CommunicationLinksSorted() []CommunicationLink {
+ result := make([]CommunicationLink, 0)
+ for _, format := range what.CommunicationLinks {
+ result = append(result, format)
+ }
+ sort.Sort(ByTechnicalCommunicationLinkTitleSort(result))
+ return result
+}
+
+func (what TechnicalAsset) HighestIntegrity(model *ParsedModel) Criticality {
+ highest := what.Integrity
+ for _, dataId := range what.DataAssetsProcessed {
+ dataAsset := model.DataAssets[dataId]
+ if dataAsset.Integrity > highest {
+ highest = dataAsset.Integrity
+ }
+ }
+ return highest
+}
+
+func (what TechnicalAsset) HighestAvailability(model *ParsedModel) Criticality {
+ highest := what.Availability
+ for _, dataId := range what.DataAssetsProcessed {
+ dataAsset := model.DataAssets[dataId]
+ if dataAsset.Availability > highest {
+ highest = dataAsset.Availability
+ }
+ }
+ return highest
+}
+
+func (what TechnicalAsset) HasDirectConnection(parsedModel *ParsedModel, otherAssetId string) bool {
+ for _, dataFlow := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[what.Id] {
+ if dataFlow.SourceId == otherAssetId {
+ return true
+ }
+ }
+ // check both directions, hence two times, just reversed
+ for _, dataFlow := range parsedModel.IncomingTechnicalCommunicationLinksMappedByTargetId[otherAssetId] {
+ if dataFlow.SourceId == what.Id {
+ return true
+ }
+ }
+ return false
+}
+
+func (what TechnicalAsset) GeneratedRisks(parsedModel *ParsedModel) []Risk {
+ resultingRisks := make([]Risk, 0)
+ if len(SortedRiskCategories(parsedModel)) == 0 {
+ fmt.Println("Uh, strange, no risks generated (yet?) and asking for them by tech asset...")
+ }
+ for _, category := range SortedRiskCategories(parsedModel) {
+ risks := SortedRisksOfCategory(parsedModel, category)
+ for _, risk := range risks {
+ if risk.MostRelevantTechnicalAssetId == what.Id {
+ resultingRisks = append(resultingRisks, risk)
+ }
+ }
+ }
+ SortByRiskSeverity(resultingRisks, parsedModel)
+ return resultingRisks
+}
+
+/*
+func (what TechnicalAsset) HighestRiskSeverity() RiskSeverity {
+ highest := Low
+ for _, risk := range what.GeneratedRisks() {
+ if risk.Severity > highest {
+ highest = risk.Severity
+ }
+ }
+ return highest
+}
+*/
+
+func (what TechnicalAsset) IsZero() bool {
+ return len(what.Id) == 0
+}
+
+func (what TechnicalAsset) ProcessesOrStoresDataAsset(dataAssetId string) bool {
+ return contains(what.DataAssetsProcessed, dataAssetId)
+}
+
+/*
+// Loops over all data assets (stored and processed by this technical asset) and determines for each
+// data asset, how many percentage of the data risk is reduced when this technical asset has all risks mitigated.
+// Example: This means if the data asset is loosing a risk and thus getting from red to amber it counts as 1.
+// Other example: When only one out of four lines (see data risk mapping) leading to red tech assets are removed by
+// the mitigations, then this counts as 0.25. The overall sum is returned.
+func (what TechnicalAsset) QuickWins() float64 {
+ result := 0.0
+ uniqueDataAssetsStoredAndProcessed := make(map[string]interface{})
+ for _, dataAssetId := range what.DataAssetsStored {
+ uniqueDataAssetsStoredAndProcessed[dataAssetId] = true
+ }
+ for _, dataAssetId := range what.DataAssetsProcessed {
+ uniqueDataAssetsStoredAndProcessed[dataAssetId] = true
+ }
+ highestSeverity := HighestSeverityStillAtRisk(what.GeneratedRisks())
+ for dataAssetId, _ := range uniqueDataAssetsStoredAndProcessed {
+ dataAsset := ParsedModelRoot.DataAssets[dataAssetId]
+ if dataAsset.IdentifiedRiskSeverityStillAtRisk() <= highestSeverity {
+ howManySameLevelCausingUsagesOfThisData := 0.0
+ for techAssetId, risks := range dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId() {
+ if !ParsedModelRoot.TechnicalAssets[techAssetId].OutOfScope {
+ for _, risk := range risks {
+ if len(risk.MostRelevantTechnicalAssetId) > 0 { // T O D O caching of generated risks inside the method?
+ if HighestSeverityStillAtRisk(ParsedModelRoot.TechnicalAssets[risk.MostRelevantTechnicalAssetId].GeneratedRisks()) == highestSeverity {
+ howManySameLevelCausingUsagesOfThisData++
+ break
+ }
+ }
+ }
+ }
+ }
+ if howManySameLevelCausingUsagesOfThisData > 0 {
+ result += 1.0 / howManySameLevelCausingUsagesOfThisData
+ }
+ }
+ }
+ return result
+}
+*/
+
+func (what TechnicalAsset) GetTrustBoundaryId(model *ParsedModel) string {
+ for _, trustBoundary := range model.TrustBoundaries {
+ for _, techAssetInside := range trustBoundary.TechnicalAssetsInside {
+ if techAssetInside == what.Id {
+ return trustBoundary.Id
+ }
+ }
+ }
+ return ""
+}
+
+func SortByTechnicalAssetRiskSeverityAndTitleStillAtRisk(assets []TechnicalAsset, parsedModel *ParsedModel) {
+ sort.Slice(assets, func(i, j int) bool {
+ risksLeft := ReduceToOnlyStillAtRisk(parsedModel, assets[i].GeneratedRisks(parsedModel))
+ risksRight := ReduceToOnlyStillAtRisk(parsedModel, assets[j].GeneratedRisks(parsedModel))
+ highestSeverityLeft := HighestSeverityStillAtRisk(parsedModel, risksLeft)
+ highestSeverityRight := HighestSeverityStillAtRisk(parsedModel, risksRight)
+ var result bool
+ if highestSeverityLeft == highestSeverityRight {
+ if len(risksLeft) == 0 && len(risksRight) > 0 {
+ return false
+ } else if len(risksLeft) > 0 && len(risksRight) == 0 {
+ return true
+ } else {
+ result = assets[i].Title < assets[j].Title
+ }
+ } else {
+ result = highestSeverityLeft > highestSeverityRight
+ }
+ if assets[i].OutOfScope && assets[j].OutOfScope {
+ result = assets[i].Title < assets[j].Title
+ } else if assets[i].OutOfScope {
+ result = false
+ } else if assets[j].OutOfScope {
+ result = true
+ }
+ return result
+ })
+}
+
+type ByTechnicalAssetRAAAndTitleSort []TechnicalAsset
+
+func (what ByTechnicalAssetRAAAndTitleSort) Len() int { return len(what) }
+func (what ByTechnicalAssetRAAAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTechnicalAssetRAAAndTitleSort) Less(i, j int) bool {
+ raaLeft := what[i].RAA
+ raaRight := what[j].RAA
+ if raaLeft == raaRight {
+ return what[i].Title < what[j].Title
+ }
+ return raaLeft > raaRight
+}
+
+/*
+type ByTechnicalAssetQuickWinsAndTitleSort []TechnicalAsset
+
+func (what ByTechnicalAssetQuickWinsAndTitleSort) Len() int { return len(what) }
+func (what ByTechnicalAssetQuickWinsAndTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTechnicalAssetQuickWinsAndTitleSort) Less(i, j int) bool {
+ qwLeft := what[i].QuickWins()
+ qwRight := what[j].QuickWins()
+ if qwLeft == qwRight {
+ return what[i].Title < what[j].Title
+ }
+ return qwLeft > qwRight
+}
+*/
+
+type ByTechnicalAssetTitleSort []TechnicalAsset
+
+func (what ByTechnicalAssetTitleSort) Len() int { return len(what) }
+func (what ByTechnicalAssetTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTechnicalAssetTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
+
+type ByOrderAndIdSort []TechnicalAsset
+
+func (what ByOrderAndIdSort) Len() int { return len(what) }
+func (what ByOrderAndIdSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByOrderAndIdSort) Less(i, j int) bool {
+ if what[i].DiagramTweakOrder == what[j].DiagramTweakOrder {
+ return what[i].Id > what[j].Id
+ }
+ return what[i].DiagramTweakOrder < what[j].DiagramTweakOrder
+}
diff --git a/pkg/security/types/technical_asset_machine.go b/pkg/security/types/technical_asset_machine.go
new file mode 100644
index 00000000..f8463805
--- /dev/null
+++ b/pkg/security/types/technical_asset_machine.go
@@ -0,0 +1,100 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type TechnicalAssetMachine int
+
+const (
+ Physical TechnicalAssetMachine = iota
+ Virtual
+ Container
+ Serverless
+)
+
+func TechnicalAssetMachineValues() []TypeEnum {
+ return []TypeEnum{
+ Physical,
+ Virtual,
+ Container,
+ Serverless,
+ }
+}
+
+var TechnicalAssetMachineTypeDescription = [...]TypeDescription{
+ {"physical", "A physical machine"},
+ {"virtual", "A virtual machine"},
+ {"container", "A container"},
+ {"serverless", "A serverless application"},
+}
+
+func ParseTechnicalAssetMachine(value string) (technicalAssetMachine TechnicalAssetMachine, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range TechnicalAssetMachineValues() {
+ if candidate.String() == value {
+ return candidate.(TechnicalAssetMachine), err
+ }
+ }
+ return technicalAssetMachine, errors.New("Unable to parse into type: " + value)
+}
+
+func (what TechnicalAssetMachine) String() string {
+ return TechnicalAssetMachineTypeDescription[what].Name
+}
+
+func (what TechnicalAssetMachine) Explain() string {
+ return TechnicalAssetMachineTypeDescription[what].Description
+}
+
+func (what TechnicalAssetMachine) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *TechnicalAssetMachine) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetMachine) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *TechnicalAssetMachine) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetMachine) find(value string) (TechnicalAssetMachine, error) {
+ for index, description := range TechnicalAssetMachineTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return TechnicalAssetMachine(index), nil
+ }
+ }
+
+ return TechnicalAssetMachine(0), fmt.Errorf("unknown technical asset machine value %q", value)
+}
diff --git a/pkg/security/types/technical_asset_machine_test.go b/pkg/security/types/technical_asset_machine_test.go
new file mode 100644
index 00000000..3f53c685
--- /dev/null
+++ b/pkg/security/types/technical_asset_machine_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseTechnicalAssetMachineTest struct {
+ input string
+ expected TechnicalAssetMachine
+ expectedError error
+}
+
+func TestParseTechnicalAssetMachine(t *testing.T) {
+ testCases := map[string]ParseTechnicalAssetMachineTest{
+ "physical": {
+ input: "physical",
+ expected: Physical,
+ },
+ "virtual": {
+ input: "virtual",
+ expected: Virtual,
+ },
+ "container": {
+ input: "container",
+ expected: Container,
+ },
+ "serverless": {
+ input: "serverless",
+ expected: Serverless,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseTechnicalAssetMachine(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/technical_asset_size.go b/pkg/security/types/technical_asset_size.go
new file mode 100644
index 00000000..65ecbc95
--- /dev/null
+++ b/pkg/security/types/technical_asset_size.go
@@ -0,0 +1,101 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type TechnicalAssetSize int
+
+const (
+ System TechnicalAssetSize = iota
+ Service
+ Application
+ Component
+)
+
+func TechnicalAssetSizeValues() []TypeEnum {
+ return []TypeEnum{
+ System,
+ Service,
+ Application,
+ Component,
+ }
+}
+
+var TechnicalAssetSizeDescription = [...]TypeDescription{
+ {"system", "A system consists of several services"},
+ {"service", "A specific service (web, mail, ...)"},
+ {"application", "A single application"},
+ {"component", "A component of an application (smaller unit like a microservice)"},
+}
+
+func (what TechnicalAssetSize) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return TechnicalAssetSizeDescription[what].Name
+}
+
+func (what TechnicalAssetSize) Explain() string {
+ return TechnicalAssetSizeDescription[what].Description
+}
+
+func ParseTechnicalAssetSize(value string) (technicalAssetSize TechnicalAssetSize, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range TechnicalAssetSizeValues() {
+ if candidate.String() == value {
+ return candidate.(TechnicalAssetSize), err
+ }
+ }
+ return technicalAssetSize, errors.New("Unable to parse into type: " + value)
+}
+
+func (what TechnicalAssetSize) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *TechnicalAssetSize) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetSize) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *TechnicalAssetSize) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetSize) find(value string) (TechnicalAssetSize, error) {
+ for index, description := range TechnicalAssetSizeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return TechnicalAssetSize(index), nil
+ }
+ }
+
+ return TechnicalAssetSize(0), fmt.Errorf("unknown technical asset size value %q", value)
+}
diff --git a/pkg/security/types/technical_asset_size_test.go b/pkg/security/types/technical_asset_size_test.go
new file mode 100644
index 00000000..62d34591
--- /dev/null
+++ b/pkg/security/types/technical_asset_size_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseTechnicalAssetSizeTest struct {
+ input string
+ expected TechnicalAssetSize
+ expectedError error
+}
+
+func TestParseTechnicalAssetSize(t *testing.T) {
+ testCases := map[string]ParseTechnicalAssetSizeTest{
+ "service": {
+ input: "service",
+ expected: Service,
+ },
+ "system": {
+ input: "system",
+ expected: System,
+ },
+ "application": {
+ input: "application",
+ expected: Application,
+ },
+ "component": {
+ input: "component",
+ expected: Component,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseTechnicalAssetSize(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/technical_asset_technology.go b/pkg/security/types/technical_asset_technology.go
new file mode 100644
index 00000000..5083cf88
--- /dev/null
+++ b/pkg/security/types/technical_asset_technology.go
@@ -0,0 +1,339 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type TechnicalAssetTechnology int
+
+const (
+ UnknownTechnology TechnicalAssetTechnology = iota
+ ClientSystem
+ Browser
+ Desktop
+ MobileApp
+ DevOpsClient
+ WebServer
+ WebApplication
+ ApplicationServer
+ Database
+ FileServer
+ LocalFileSystem
+ ERP
+ CMS
+ WebServiceREST
+ WebServiceSOAP
+ EJB
+ SearchIndex
+ SearchEngine
+ ServiceRegistry
+ ReverseProxy
+ LoadBalancer
+ BuildPipeline
+ SourcecodeRepository
+ ArtifactRegistry
+ CodeInspectionPlatform
+ Monitoring
+ LDAPServer
+ ContainerPlatform
+ BatchProcessing
+ EventListener
+ IdentityProvider
+ IdentityStoreLDAP
+ IdentityStoreDatabase
+ Tool
+ CLI
+ Task
+ Function
+ Gateway // TODO rename to API-Gateway to be more clear?
+ IoTDevice
+ MessageQueue
+ StreamProcessing
+ ServiceMesh
+ DataLake
+ BigDataPlatform
+ ReportEngine
+ AI
+ MailServer
+ Vault
+ HSM
+ WAF
+ IDS
+ IPS
+ Scheduler
+ Mainframe
+ BlockStorage
+ Library
+)
+
+func TechnicalAssetTechnologyValues() []TypeEnum {
+ return []TypeEnum{
+ UnknownTechnology,
+ ClientSystem,
+ Browser,
+ Desktop,
+ MobileApp,
+ DevOpsClient,
+ WebServer,
+ WebApplication,
+ ApplicationServer,
+ Database,
+ FileServer,
+ LocalFileSystem,
+ ERP,
+ CMS,
+ WebServiceREST,
+ WebServiceSOAP,
+ EJB,
+ SearchIndex,
+ SearchEngine,
+ ServiceRegistry,
+ ReverseProxy,
+ LoadBalancer,
+ BuildPipeline,
+ SourcecodeRepository,
+ ArtifactRegistry,
+ CodeInspectionPlatform,
+ Monitoring,
+ LDAPServer,
+ ContainerPlatform,
+ BatchProcessing,
+ EventListener,
+ IdentityProvider,
+ IdentityStoreLDAP,
+ IdentityStoreDatabase,
+ Tool,
+ CLI,
+ Task,
+ Function,
+ Gateway,
+ IoTDevice,
+ MessageQueue,
+ StreamProcessing,
+ ServiceMesh,
+ DataLake,
+ BigDataPlatform,
+ ReportEngine,
+ AI,
+ MailServer,
+ Vault,
+ HSM,
+ WAF,
+ IDS,
+ IPS,
+ Scheduler,
+ Mainframe,
+ BlockStorage,
+ Library,
+ }
+}
+
+var TechnicalAssetTechnologyTypeDescription = [...]TypeDescription{
+ {"unknown-technology", "Unknown technology"},
+ {"client-system", "A client system"},
+ {"browser", "A web browser"},
+ {"desktop", "A desktop system (or laptop)"},
+ {"mobile-app", "A mobile app (smartphone, tablet)"},
+ {"devops-client", "A client used for DevOps"},
+ {"web-server", "A web server"},
+ {"web-application", "A web application"},
+ {"application-server", "An application server (Apache Tomcat, ...)"},
+ {"database", "A database"},
+ {"file-server", "A file server"},
+ {"local-file-system", "The local file system"},
+ {"erp", "Enterprise-Resource-Planning"},
+ {"cms", "Content Management System"},
+ {"web-service-rest", "A REST web service (API)"},
+ {"web-service-soap", "A SOAP web service (API)"},
+ {"ejb", "Jakarta Enterprise Beans fka Enterprise JavaBeans"},
+ {"search-index", "The index database of a search engine"},
+ {"search-engine", "A search engine"},
+ {"service-registry", "A central place where data schemas can be found and distributed"},
+ {"reverse-proxy", "A proxy hiding internal infrastructure from caller making requests. Can also reduce load"},
+ {"load-balancer", "A load balancer directing incoming requests to available internal infrastructure"},
+ {"build-pipeline", "A software build pipeline"},
+ {"sourcecode-repository", "Git or similar"},
+ {"artifact-registry", "A registry to store build artifacts"},
+ {"code-inspection-platform", "(Static) Code Analysis)"},
+ {"monitoring", "A monitoring system (SIEM, logs)"},
+ {"ldap-server", "A LDAP server"},
+ {"container-platform", "A platform for hosting and executing containers"},
+ {"batch-processing", "A set of tools automatically processing data"},
+ {"event-listener", "An event listener waiting to be triggered and spring to action"},
+ {"identity-provider", "A authentication provider"},
+ {"identity-store-ldap", "Authentication data as LDAP"},
+ {"identity-store-database", "Authentication data as database"},
+ {"tool", "A specific tool"},
+ {"threagile", "A command line tool"},
+ {"task", "A specific task"},
+ {"function", "A specific function (maybe RPC ?)"},
+ {"gateway", "A gateway connecting two systems or trust boundaries"},
+ {"iot-device", "An IoT device"},
+ {"message-queue", "A message queue (like MQTT)"},
+ {"stream-processing", "Data stream processing"},
+ {"service-mesh", "Infrastructure for service-to-service communication"},
+ {"data-lake", "A huge database"},
+ {"big-data-platform", "Storage for big data"},
+ {"report-engine", "Software for report generation"},
+ {"ai", "An Artificial Intelligence service"},
+ {"mail-server", "A Mail server"},
+ {"vault", "Encryption and key management"},
+ {"hsm", "Hardware Security Module"},
+ {"waf", "Web Application Firewall"},
+ {"ids", "Intrusion Detection System"},
+ {"ips", "Intrusion Prevention System"},
+ {"scheduler", "Scheduled tasks"},
+ {"mainframe", "A central, big computer"},
+ {"block-storage", "SAN or similar central file storage"},
+ {"library", "A software library"},
+}
+
+func (what TechnicalAssetTechnology) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return TechnicalAssetTechnologyTypeDescription[what].Name
+}
+
+func (what TechnicalAssetTechnology) Explain() string {
+ return TechnicalAssetTechnologyTypeDescription[what].Description
+}
+
+func ParseTechnicalAssetTechnology(value string) (technicalAssetTechnology TechnicalAssetTechnology, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range TechnicalAssetTechnologyValues() {
+ if candidate.String() == value {
+ return candidate.(TechnicalAssetTechnology), err
+ }
+ }
+ return technicalAssetTechnology, errors.New("Unable to parse into type: " + value)
+}
+
+func (what TechnicalAssetTechnology) IsWebApplication() bool {
+ return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == CMS || what == IdentityProvider || what == ReportEngine
+}
+
+func (what TechnicalAssetTechnology) IsWebService() bool {
+ return what == WebServiceREST || what == WebServiceSOAP
+}
+
+func (what TechnicalAssetTechnology) IsIdentityRelated() bool {
+ return what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase
+}
+
+func (what TechnicalAssetTechnology) IsSecurityControlRelated() bool {
+ return what == Vault || what == HSM || what == WAF || what == IDS || what == IPS
+}
+
+func (what TechnicalAssetTechnology) IsUnprotectedCommunicationsTolerated() bool {
+ return what == Monitoring || what == IDS || what == IPS
+}
+
+func (what TechnicalAssetTechnology) IsUnnecessaryDataTolerated() bool {
+ return what == Monitoring || what == IDS || what == IPS
+}
+
+func (what TechnicalAssetTechnology) IsCloseToHighValueTargetsTolerated() bool {
+ return what == Monitoring || what == IDS || what == IPS || what == LoadBalancer || what == ReverseProxy
+}
+
+func (what TechnicalAssetTechnology) IsClient() bool {
+ return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == IoTDevice
+}
+
+func (what TechnicalAssetTechnology) IsUsuallyAbleToPropagateIdentityToOutgoingTargets() bool {
+ return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp ||
+ what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP ||
+ what == CMS || what == WebServiceREST || what == WebServiceSOAP || what == EJB ||
+ what == SearchEngine || what == ReverseProxy || what == LoadBalancer || what == IdentityProvider ||
+ what == Tool || what == CLI || what == Task || what == Function || what == Gateway ||
+ what == IoTDevice || what == MessageQueue || what == ServiceMesh || what == ReportEngine || what == WAF || what == Library
+
+}
+
+func (what TechnicalAssetTechnology) IsLessProtectedType() bool {
+ return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == WebServer || what == WebApplication || what == ApplicationServer || what == CMS ||
+ what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == BuildPipeline || what == SourcecodeRepository ||
+ what == ArtifactRegistry || what == CodeInspectionPlatform || what == Monitoring || what == IoTDevice || what == AI || what == MailServer || what == Scheduler ||
+ what == Mainframe
+}
+
+func (what TechnicalAssetTechnology) IsUsuallyProcessingEndUserRequests() bool {
+ return what == WebServer || what == WebApplication || what == ApplicationServer || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == ReportEngine
+}
+
+func (what TechnicalAssetTechnology) IsUsuallyStoringEndUserData() bool {
+ return what == Database || what == ERP || what == FileServer || what == LocalFileSystem || what == BlockStorage || what == MailServer || what == StreamProcessing || what == MessageQueue
+}
+
+func (what TechnicalAssetTechnology) IsExclusivelyFrontendRelated() bool {
+ return what == ClientSystem || what == Browser || what == Desktop || what == MobileApp || what == DevOpsClient || what == CMS || what == ReverseProxy || what == WAF || what == LoadBalancer || what == Gateway || what == IoTDevice
+}
+
+func (what TechnicalAssetTechnology) IsExclusivelyBackendRelated() bool {
+ return what == Database || what == IdentityProvider || what == IdentityStoreLDAP || what == IdentityStoreDatabase || what == ERP || what == WebServiceREST || what == WebServiceSOAP || what == EJB || what == SearchIndex ||
+ what == SearchEngine || what == ContainerPlatform || what == BatchProcessing || what == EventListener || what == DataLake || what == BigDataPlatform || what == MessageQueue ||
+ what == StreamProcessing || what == ServiceMesh || what == Vault || what == HSM || what == Scheduler || what == Mainframe || what == FileServer || what == BlockStorage
+}
+
+func (what TechnicalAssetTechnology) IsDevelopmentRelevant() bool {
+ return what == BuildPipeline || what == SourcecodeRepository || what == ArtifactRegistry || what == CodeInspectionPlatform || what == DevOpsClient
+}
+
+func (what TechnicalAssetTechnology) IsTrafficForwarding() bool {
+ return what == LoadBalancer || what == ReverseProxy || what == WAF
+}
+
+func (what TechnicalAssetTechnology) IsEmbeddedComponent() bool {
+ return what == Library
+}
+
+func (what TechnicalAssetTechnology) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *TechnicalAssetTechnology) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetTechnology) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *TechnicalAssetTechnology) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetTechnology) find(value string) (TechnicalAssetTechnology, error) {
+ for index, description := range TechnicalAssetTechnologyTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return TechnicalAssetTechnology(index), nil
+ }
+ }
+
+ return TechnicalAssetTechnology(0), fmt.Errorf("unknown technical asset technology value %q", value)
+}
diff --git a/pkg/security/types/technical_asset_technology_test.go b/pkg/security/types/technical_asset_technology_test.go
new file mode 100644
index 00000000..0f1fc086
--- /dev/null
+++ b/pkg/security/types/technical_asset_technology_test.go
@@ -0,0 +1,264 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseTechnicalAssetTechnologyTest struct {
+ input string
+ expected TechnicalAssetTechnology
+ expectedError error
+}
+
+func TestParseTechnicalAssetTechnology(t *testing.T) {
+ testCases := map[string]ParseTechnicalAssetTechnologyTest{
+ "unknown-technology": {
+ input: "unknown-technology",
+ expected: UnknownTechnology,
+ },
+ "client-system": {
+ input: "client-system",
+ expected: ClientSystem,
+ },
+ "browser": {
+ input: "browser",
+ expected: Browser,
+ },
+ "desktop": {
+ input: "desktop",
+ expected: Desktop,
+ },
+ "mobile-app": {
+ input: "mobile-app",
+ expected: MobileApp,
+ },
+ "devops-client": {
+ input: "devops-client",
+ expected: DevOpsClient,
+ },
+ "web-server": {
+ input: "web-server",
+ expected: WebServer,
+ },
+ "web-application": {
+ input: "web-application",
+ expected: WebApplication,
+ },
+ "application-server": {
+ input: "application-server",
+ expected: ApplicationServer,
+ },
+ "database": {
+ input: "database",
+ expected: Database,
+ },
+ "file-server": {
+ input: "file-server",
+ expected: FileServer,
+ },
+ "local-file-system": {
+ input: "local-file-system",
+ expected: LocalFileSystem,
+ },
+ "erp": {
+ input: "erp",
+ expected: ERP,
+ },
+ "cms": {
+ input: "cms",
+ expected: CMS,
+ },
+ "web-service-rest": {
+ input: "web-service-rest",
+ expected: WebServiceREST,
+ },
+ "web-service-soap": {
+ input: "web-service-soap",
+ expected: WebServiceSOAP,
+ },
+ "ejb": {
+ input: "ejb",
+ expected: EJB,
+ },
+ "search-index": {
+ input: "search-index",
+ expected: SearchIndex,
+ },
+ "search-engine": {
+ input: "search-engine",
+ expected: SearchEngine,
+ },
+ "service-registry": {
+ input: "service-registry",
+ expected: ServiceRegistry,
+ },
+ "reverse-proxy": {
+ input: "reverse-proxy",
+ expected: ReverseProxy,
+ },
+ "load-balancer": {
+ input: "load-balancer",
+ expected: LoadBalancer,
+ },
+ "build-pipeline": {
+ input: "build-pipeline",
+ expected: BuildPipeline,
+ },
+ "sourcecode-repository": {
+ input: "sourcecode-repository",
+ expected: SourcecodeRepository,
+ },
+ "artifact-registry": {
+ input: "artifact-registry",
+ expected: ArtifactRegistry,
+ },
+ "code-inspection-platform": {
+ input: "code-inspection-platform",
+ expected: CodeInspectionPlatform,
+ },
+ "monitoring": {
+ input: "monitoring",
+ expected: Monitoring,
+ },
+ "ldap-server": {
+ input: "ldap-server",
+ expected: LDAPServer,
+ },
+ "container-platform": {
+ input: "container-platform",
+ expected: ContainerPlatform,
+ },
+ "batch-processing": {
+ input: "batch-processing",
+ expected: BatchProcessing,
+ },
+ "event-listener": {
+ input: "event-listener",
+ expected: EventListener,
+ },
+ "identity-provider": {
+ input: "identity-provider",
+ expected: IdentityProvider,
+ },
+ "identity-store-ldap": {
+ input: "identity-store-ldap",
+ expected: IdentityStoreLDAP,
+ },
+ "identity-store-database": {
+ input: "identity-store-database",
+ expected: IdentityStoreDatabase,
+ },
+ "tool": {
+ input: "tool",
+ expected: Tool,
+ },
+ "threagile": {
+ input: "threagile",
+ expected: CLI,
+ },
+ "task": {
+ input: "task",
+ expected: Task,
+ },
+ "function": {
+ input: "function",
+ expected: Function,
+ },
+ "gateway": {
+ input: "gateway",
+ expected: Gateway,
+ },
+ "iot-device": {
+ input: "iot-device",
+ expected: IoTDevice,
+ },
+ "message-queue": {
+ input: "message-queue",
+ expected: MessageQueue,
+ },
+ "stream-processing": {
+ input: "stream-processing",
+ expected: StreamProcessing,
+ },
+ "service-mesh": {
+ input: "service-mesh",
+ expected: ServiceMesh,
+ },
+ "data-lake": {
+ input: "data-lake",
+ expected: DataLake,
+ },
+ "big-data-platform": {
+ input: "big-data-platform",
+ expected: BigDataPlatform,
+ },
+ "report-engine": {
+ input: "report-engine",
+ expected: ReportEngine,
+ },
+ "ai": {
+ input: "ai",
+ expected: AI,
+ },
+ "mail-server": {
+ input: "mail-server",
+ expected: MailServer,
+ },
+ "vault": {
+ input: "vault",
+ expected: Vault,
+ },
+ "hsm": {
+ input: "hsm",
+ expected: HSM,
+ },
+ "waf": {
+ input: "waf",
+ expected: WAF,
+ },
+ "ids": {
+ input: "ids",
+ expected: IDS,
+ },
+ "ips": {
+ input: "ips",
+ expected: IPS,
+ },
+ "scheduler": {
+ input: "scheduler",
+ expected: Scheduler,
+ },
+ "mainframe": {
+ input: "mainframe",
+ expected: Mainframe,
+ },
+ "block-storage": {
+ input: "block-storage",
+ expected: BlockStorage,
+ },
+ "library": {
+ input: "library",
+ expected: Library,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseTechnicalAssetTechnology(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/technical_asset_type.go b/pkg/security/types/technical_asset_type.go
new file mode 100644
index 00000000..02f585c2
--- /dev/null
+++ b/pkg/security/types/technical_asset_type.go
@@ -0,0 +1,98 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type TechnicalAssetType int
+
+const (
+ ExternalEntity TechnicalAssetType = iota
+ Process
+ Datastore
+)
+
+func TechnicalAssetTypeValues() []TypeEnum {
+ return []TypeEnum{
+ ExternalEntity,
+ Process,
+ Datastore,
+ }
+}
+
+var TechnicalAssetTypeDescription = [...]TypeDescription{
+ {"external-entity", "This asset is hosted and managed by a third party"},
+ {"process", "A software process"},
+ {"datastore", "This asset stores data"},
+}
+
+func (what TechnicalAssetType) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return TechnicalAssetTypeDescription[what].Name
+}
+
+func (what TechnicalAssetType) Explain() string {
+ return TechnicalAssetTypeDescription[what].Description
+}
+
+func ParseTechnicalAssetType(value string) (technicalAssetType TechnicalAssetType, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range TechnicalAssetTypeValues() {
+ if candidate.String() == value {
+ return candidate.(TechnicalAssetType), err
+ }
+ }
+ return technicalAssetType, errors.New("Unable to parse into type: " + value)
+}
+
+func (what TechnicalAssetType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *TechnicalAssetType) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetType) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *TechnicalAssetType) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TechnicalAssetType) find(value string) (TechnicalAssetType, error) {
+ for index, description := range TechnicalAssetTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return TechnicalAssetType(index), nil
+ }
+ }
+
+ return TechnicalAssetType(0), fmt.Errorf("unknown technical asset type value %q", value)
+}
diff --git a/pkg/security/types/technical_asset_type_test.go b/pkg/security/types/technical_asset_type_test.go
new file mode 100644
index 00000000..a61cecb2
--- /dev/null
+++ b/pkg/security/types/technical_asset_type_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseTechnicalAssetTypeTest struct {
+ input string
+ expected TechnicalAssetType
+ expectedError error
+}
+
+func TestParseTechnicalAssetType(t *testing.T) {
+ testCases := map[string]ParseTechnicalAssetTypeTest{
+ "external-entity": {
+ input: "external-entity",
+ expected: ExternalEntity,
+ },
+ "process": {
+ input: "process",
+ expected: Process,
+ },
+ "datastore": {
+ input: "datastore",
+ expected: Datastore,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseTechnicalAssetType(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/trust_boundary.go b/pkg/security/types/trust_boundary.go
new file mode 100644
index 00000000..15c1d8c7
--- /dev/null
+++ b/pkg/security/types/trust_boundary.go
@@ -0,0 +1,127 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "sort"
+)
+
+type TrustBoundary struct {
+ Id string `json:"id,omitempty" yaml:"id,omitempty"`
+ Title string `json:"title,omitempty" yaml:"title,omitempty"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ Type TrustBoundaryType `json:"type,omitempty" yaml:"type,omitempty"`
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ TechnicalAssetsInside []string `json:"technical_assets_inside,omitempty" yaml:"technical_assets_inside,omitempty"`
+ TrustBoundariesNested []string `json:"trust_boundaries_nested,omitempty" yaml:"trust_boundaries_nested,omitempty"`
+}
+
+func (what TrustBoundary) RecursivelyAllTechnicalAssetIDsInside(model *ParsedModel) []string {
+ result := make([]string, 0)
+ what.addAssetIDsRecursively(model, &result)
+ return result
+}
+
+func (what TrustBoundary) IsTaggedWithAny(tags ...string) bool {
+ return containsCaseInsensitiveAny(what.Tags, tags...)
+}
+
+func (what TrustBoundary) IsTaggedWithBaseTag(baseTag string) bool {
+ return IsTaggedWithBaseTag(what.Tags, baseTag)
+}
+
+func (what TrustBoundary) IsTaggedWithAnyTraversingUp(model *ParsedModel, tags ...string) bool {
+ if what.IsTaggedWithAny(tags...) {
+ return true
+ }
+ parentID := what.ParentTrustBoundaryID(model)
+ if len(parentID) > 0 && model.TrustBoundaries[parentID].IsTaggedWithAnyTraversingUp(model, tags...) {
+ return true
+ }
+ return false
+}
+
+func (what TrustBoundary) ParentTrustBoundaryID(model *ParsedModel) string {
+ var result string
+ for _, candidate := range model.TrustBoundaries {
+ if contains(candidate.TrustBoundariesNested, what.Id) {
+ result = candidate.Id
+ return result
+ }
+ }
+ return result
+}
+
+func (what TrustBoundary) HighestConfidentiality(model *ParsedModel) Confidentiality {
+ highest := Public
+ for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestConfidentiality(model) > highest {
+ highest = techAsset.HighestConfidentiality(model)
+ }
+ }
+ return highest
+}
+
+func (what TrustBoundary) HighestIntegrity(model *ParsedModel) Criticality {
+ highest := Archive
+ for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestIntegrity(model) > highest {
+ highest = techAsset.HighestIntegrity(model)
+ }
+ }
+ return highest
+}
+
+func (what TrustBoundary) HighestAvailability(model *ParsedModel) Criticality {
+ highest := Archive
+ for _, id := range what.RecursivelyAllTechnicalAssetIDsInside(model) {
+ techAsset := model.TechnicalAssets[id]
+ if techAsset.HighestAvailability(model) > highest {
+ highest = techAsset.HighestAvailability(model)
+ }
+ }
+ return highest
+}
+
+func (what TrustBoundary) AllParentTrustBoundaryIDs(model *ParsedModel) []string {
+ result := make([]string, 0)
+ what.addTrustBoundaryIDsRecursively(model, &result)
+ return result
+}
+
+func (what TrustBoundary) addAssetIDsRecursively(model *ParsedModel, result *[]string) {
+ *result = append(*result, what.TechnicalAssetsInside...)
+ for _, nestedBoundaryID := range what.TrustBoundariesNested {
+ model.TrustBoundaries[nestedBoundaryID].addAssetIDsRecursively(model, result)
+ }
+}
+
+func (what TrustBoundary) addTrustBoundaryIDsRecursively(model *ParsedModel, result *[]string) {
+ *result = append(*result, what.Id)
+ parentID := what.ParentTrustBoundaryID(model)
+ if len(parentID) > 0 {
+ model.TrustBoundaries[parentID].addTrustBoundaryIDsRecursively(model, result)
+ }
+}
+
+// as in Go ranging over map is random order, range over them in sorted (hence reproducible) way:
+func SortedKeysOfTrustBoundaries(model *ParsedModel) []string {
+ keys := make([]string, 0)
+ for k := range model.TrustBoundaries {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+type ByTrustBoundaryTitleSort []TrustBoundary
+
+func (what ByTrustBoundaryTitleSort) Len() int { return len(what) }
+func (what ByTrustBoundaryTitleSort) Swap(i, j int) { what[i], what[j] = what[j], what[i] }
+func (what ByTrustBoundaryTitleSort) Less(i, j int) bool {
+ return what[i].Title < what[j].Title
+}
diff --git a/pkg/security/types/trust_boundary_type.go b/pkg/security/types/trust_boundary_type.go
new file mode 100644
index 00000000..9a9a005d
--- /dev/null
+++ b/pkg/security/types/trust_boundary_type.go
@@ -0,0 +1,118 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type TrustBoundaryType int
+
+const (
+ NetworkOnPrem TrustBoundaryType = iota
+ NetworkDedicatedHoster
+ NetworkVirtualLAN
+ NetworkCloudProvider
+ NetworkCloudSecurityGroup
+ NetworkPolicyNamespaceIsolation
+ ExecutionEnvironment
+)
+
+func TrustBoundaryTypeValues() []TypeEnum {
+ return []TypeEnum{
+ NetworkOnPrem,
+ NetworkDedicatedHoster,
+ NetworkVirtualLAN,
+ NetworkCloudProvider,
+ NetworkCloudSecurityGroup,
+ NetworkPolicyNamespaceIsolation,
+ ExecutionEnvironment,
+ }
+}
+
+var TrustBoundaryTypeDescription = [...]TypeDescription{
+ {"network-on-prem", "The whole network is on prem"},
+ {"network-dedicated-hoster", "The network is at a dedicated hoster"},
+ {"network-virtual-lan", "Network is a VLAN"},
+ {"network-cloud-provider", "Network is at a cloud provider"},
+ {"network-cloud-security-group", "Cloud rules controlling network traffic"},
+ {"network-policy-namespace-isolation", "Segregation in a Kubernetes cluster"},
+ {"execution-environment", "Logical group of items (not a protective network boundary in that sense). More like a namespace or another logical group of items"},
+}
+
+func ParseTrustBoundary(value string) (trustBoundary TrustBoundaryType, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range TrustBoundaryTypeValues() {
+ if candidate.String() == value {
+ return candidate.(TrustBoundaryType), err
+ }
+ }
+ return trustBoundary, errors.New("Unable to parse into type: " + value)
+}
+
+func (what TrustBoundaryType) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ return TrustBoundaryTypeDescription[what].Name
+}
+
+func (what TrustBoundaryType) Explain() string {
+ return TrustBoundaryTypeDescription[what].Description
+}
+
+func (what TrustBoundaryType) IsNetworkBoundary() bool {
+ return what == NetworkOnPrem || what == NetworkDedicatedHoster || what == NetworkVirtualLAN ||
+ what == NetworkCloudProvider || what == NetworkCloudSecurityGroup || what == NetworkPolicyNamespaceIsolation
+}
+
+func (what TrustBoundaryType) IsWithinCloud() bool {
+ return what == NetworkCloudProvider || what == NetworkCloudSecurityGroup
+}
+
+func (what TrustBoundaryType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *TrustBoundaryType) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TrustBoundaryType) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *TrustBoundaryType) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what TrustBoundaryType) find(value string) (TrustBoundaryType, error) {
+ for index, description := range TrustBoundaryTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return TrustBoundaryType(index), nil
+ }
+ }
+
+ return TrustBoundaryType(0), fmt.Errorf("unknown trust boundary type value %q", value)
+}
diff --git a/pkg/security/types/trust_boundary_type_test.go b/pkg/security/types/trust_boundary_type_test.go
new file mode 100644
index 00000000..d93d00a1
--- /dev/null
+++ b/pkg/security/types/trust_boundary_type_test.go
@@ -0,0 +1,63 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseTrustBoundaryTest struct {
+ input string
+ expected TrustBoundaryType
+ expectedError error
+}
+
+func TestParseTrustBoundaryType(t *testing.T) {
+ testCases := map[string]ParseTrustBoundaryTest{
+ "network-on-prem": {
+ input: "network-on-prem",
+ expected: NetworkOnPrem,
+ },
+ "network-dedicated-hoster": {
+ input: "network-dedicated-hoster",
+ expected: NetworkDedicatedHoster,
+ },
+ "network-virtual-lan": {
+ input: "network-virtual-lan",
+ expected: NetworkVirtualLAN,
+ },
+ "network-cloud-provider": {
+ input: "network-cloud-provider",
+ expected: NetworkCloudProvider,
+ },
+ "network-cloud-security-group": {
+ input: "network-cloud-security-group",
+ expected: NetworkCloudSecurityGroup,
+ },
+ "network-policy-namespace-isolation": {
+ input: "network-policy-namespace-isolation",
+ expected: NetworkPolicyNamespaceIsolation,
+ },
+ "execution-environment": {
+ input: "execution-environment",
+ expected: ExecutionEnvironment,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseTrustBoundary(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/security/types/types.go b/pkg/security/types/types.go
new file mode 100644
index 00000000..78b669a8
--- /dev/null
+++ b/pkg/security/types/types.go
@@ -0,0 +1,42 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+// TypeDescription contains a name for a type and its description
+type TypeDescription struct {
+ Name string
+ Description string
+}
+
+type TypeEnum interface {
+ String() string
+ Explain() string
+}
+
+func GetBuiltinTypeValues() map[string][]TypeEnum {
+ return map[string][]TypeEnum{
+ "Authentication": AuthenticationValues(),
+ "Authorization": AuthorizationValues(),
+ "Confidentiality": ConfidentialityValues(),
+ "Criticality (for integrity and availability)": CriticalityValues(),
+ "Data Breach Probability": DataBreachProbabilityValues(),
+ "Data Format": DataFormatValues(),
+ "Encryption": EncryptionStyleValues(),
+ "Protocol": ProtocolValues(),
+ "Quantity": QuantityValues(),
+ "Risk Exploitation Impact": RiskExploitationImpactValues(),
+ "Risk Exploitation Likelihood": RiskExploitationLikelihoodValues(),
+ "Risk Function": RiskFunctionValues(),
+ "Risk Severity": RiskSeverityValues(),
+ "Risk Status": RiskStatusValues(),
+ "STRIDE": STRIDEValues(),
+ "Technical Asset Machine": TechnicalAssetMachineValues(),
+ "Technical Asset Size": TechnicalAssetSizeValues(),
+ "Technical Asset Technology": TechnicalAssetTechnologyValues(),
+ "Technical Asset Type": TechnicalAssetTypeValues(),
+ "Trust Boundary Type": TrustBoundaryTypeValues(),
+ "Usage": UsageValues(),
+ }
+}
diff --git a/pkg/security/types/usage.go b/pkg/security/types/usage.go
new file mode 100644
index 00000000..d0eee3d3
--- /dev/null
+++ b/pkg/security/types/usage.go
@@ -0,0 +1,99 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v3"
+ "strings"
+)
+
+type Usage int
+
+const (
+ Business Usage = iota
+ DevOps
+)
+
+func UsageValues() []TypeEnum {
+ return []TypeEnum{
+ Business,
+ DevOps,
+ }
+}
+
+func ParseUsage(value string) (usage Usage, err error) {
+ value = strings.TrimSpace(value)
+ for _, candidate := range UsageValues() {
+ if candidate.String() == value {
+ return candidate.(Usage), err
+ }
+ }
+ return usage, errors.New("Unable to parse into type: " + value)
+}
+
+var UsageTypeDescription = [...]TypeDescription{
+ {"business", "This system is operational and does business tasks"},
+ {"devops", "This system is for development and/or deployment or other operational tasks"},
+}
+
+func (what Usage) String() string {
+ // NOTE: maintain list also in schema.json for validation in IDEs
+ //return [...]string{"business", "devops"}[what]
+ return UsageTypeDescription[what].Name
+}
+
+func (what Usage) Explain() string {
+ return UsageTypeDescription[what].Description
+}
+
+func (what Usage) Title() string {
+ return [...]string{"Business", "DevOps"}[what]
+}
+
+func (what Usage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(what.String())
+}
+
+func (what *Usage) UnmarshalJSON(data []byte) error {
+ var text string
+ unmarshalError := json.Unmarshal(data, &text)
+ if unmarshalError != nil {
+ return unmarshalError
+ }
+
+ value, findError := what.find(text)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Usage) MarshalYAML() (interface{}, error) {
+ return what.String(), nil
+}
+
+func (what *Usage) UnmarshalYAML(node *yaml.Node) error {
+ value, findError := what.find(node.Value)
+ if findError != nil {
+ return findError
+ }
+
+ *what = value
+ return nil
+}
+
+func (what Usage) find(value string) (Usage, error) {
+ for index, description := range UsageTypeDescription {
+ if strings.EqualFold(value, description.Name) {
+ return Usage(index), nil
+ }
+ }
+
+ return Usage(0), fmt.Errorf("unknown usage type value %q", value)
+}
diff --git a/pkg/security/types/usage_test.go b/pkg/security/types/usage_test.go
new file mode 100644
index 00000000..fafb08f1
--- /dev/null
+++ b/pkg/security/types/usage_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package types
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type ParseUsageTest struct {
+ input string
+ expected Usage
+ expectedError error
+}
+
+func TestParseUsage(t *testing.T) {
+ testCases := map[string]ParseUsageTest{
+ "business": {
+ input: "business",
+ expected: Business,
+ },
+ "devops": {
+ input: "devops",
+ expected: DevOps,
+ },
+ "unknown": {
+ input: "unknown",
+ expectedError: errors.New("Unable to parse into type: unknown"),
+ },
+ }
+
+ for name, testCase := range testCases {
+ t.Run(name, func(t *testing.T) {
+ actual, err := ParseUsage(testCase.input)
+
+ assert.Equal(t, testCase.expected, actual)
+ assert.Equal(t, testCase.expectedError, err)
+ })
+ }
+}
diff --git a/pkg/server/execute.go b/pkg/server/execute.go
new file mode 100644
index 00000000..6fc34fd7
--- /dev/null
+++ b/pkg/server/execute.go
@@ -0,0 +1,229 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/gin-gonic/gin"
+)
+
+func (s *server) analyze(ginContext *gin.Context) {
+ s.execute(ginContext, false)
+}
+
+func (s *server) check(ginContext *gin.Context) {
+ _, ok := s.execute(ginContext, true)
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model is ok",
+ })
+ }
+}
+
+func (s *server) execute(ginContext *gin.Context, dryRun bool) (yamlContent []byte, ok bool) {
+ defer func() {
+ var err error
+ if r := recover(); r != nil {
+ s.errorCount++
+ err = r.(error)
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": strings.TrimSpace(err.Error()),
+ })
+ ok = false
+ }
+ }()
+
+ dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+
+ fileUploaded, header, err := ginContext.Request.FormFile("file")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+
+ if header.Size > 50000000 {
+ msg := "maximum model upload file size exceeded (denial-of-service protection)"
+ log.Println(msg)
+ ginContext.JSON(http.StatusRequestEntityTooLarge, gin.H{
+ "error": msg,
+ })
+ return yamlContent, false
+ }
+
+ filenameUploaded := strings.TrimSpace(header.Filename)
+
+ tmpInputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-input-")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ defer func() { _ = os.RemoveAll(tmpInputDir) }()
+
+ tmpModelFile, err := os.CreateTemp(tmpInputDir, "threagile-model-*")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ defer func() { _ = os.Remove(tmpModelFile.Name()) }()
+ _, err = io.Copy(tmpModelFile, fileUploaded)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+
+ yamlFile := tmpModelFile.Name()
+
+ if strings.ToLower(filepath.Ext(filenameUploaded)) == ".zip" {
+ // unzip first (including the resources like images etc.)
+ if s.config.Verbose {
+ fmt.Println("Decompressing uploaded archive")
+ }
+ filenamesUnzipped, err := unzip(tmpModelFile.Name(), tmpInputDir)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ found := false
+ for _, name := range filenamesUnzipped {
+ if strings.ToLower(filepath.Ext(name)) == ".yaml" {
+ yamlFile = name
+ found = true
+ break
+ }
+ }
+ if !found {
+ panic(errors.New("no yaml file found in uploaded archive"))
+ }
+ }
+
+ tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-output-")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ defer func() { _ = os.RemoveAll(tmpOutputDir) }()
+
+ tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-result-*.zip")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ defer func() { _ = os.Remove(tmpResultFile.Name()) }()
+
+ if dryRun {
+ s.doItViaRuntimeCall(yamlFile, tmpOutputDir, false, false, false, false, false, true, true, true, 40)
+ } else {
+ s.doItViaRuntimeCall(yamlFile, tmpOutputDir, true, true, true, true, true, true, true, true, dpi)
+ }
+
+ yamlContent, err = os.ReadFile(filepath.Clean(yamlFile))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ err = os.WriteFile(filepath.Join(tmpOutputDir, s.config.InputFile), yamlContent, 0400)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+
+ if !dryRun {
+ files := []string{
+ filepath.Join(tmpOutputDir, s.config.InputFile),
+ filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG),
+ filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG),
+ filepath.Join(tmpOutputDir, s.config.ReportFilename),
+ filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename),
+ filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonRisksFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonStatsFilename),
+ }
+ if s.config.KeepDiagramSourceFiles {
+ files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG))
+ files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenameDOT))
+ }
+ err = zipFiles(tmpResultFile.Name(), files)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return yamlContent, false
+ }
+ if s.config.Verbose {
+ log.Println("Streaming back result file: " + tmpResultFile.Name())
+ }
+ ginContext.FileAttachment(tmpResultFile.Name(), "threagile-result.zip")
+ }
+ s.successCount++
+ return yamlContent, true
+}
+
+// ultimately to avoid any in-process memory and/or data leaks by the used third party libs like PDF generation: exec and quit
+func (s *server) doItViaRuntimeCall(modelFile string, outputDir string,
+ generateDataFlowDiagram, generateDataAssetDiagram, generateReportPdf, generateRisksExcel, generateTagsExcel, generateRisksJSON, generateTechnicalAssetsJSON, generateStatsJSON bool,
+ dpi int) {
+ // Remember to also add the same args to the exec based sub-process calls!
+ var cmd *exec.Cmd
+ args := []string{"-model", modelFile, "-output", outputDir, "-execute-model-macro", s.config.ExecuteModelMacro, "-raa-run", s.config.RAAPlugin, "-custom-risk-rules-plugins", strings.Join(s.config.RiskRulesPlugins, ","), "-skip-risk-rules", s.config.SkipRiskRules, "-diagram-dpi", strconv.Itoa(dpi)}
+ if s.config.Verbose {
+ args = append(args, "-verbose")
+ }
+ if s.config.IgnoreOrphanedRiskTracking { // TODO why add all them as arguments, when they are also variables on outer level?
+ args = append(args, "-ignore-orphaned-risk-tracking")
+ }
+ if generateDataFlowDiagram {
+ args = append(args, "-generate-data-flow-diagram")
+ }
+ if generateDataAssetDiagram {
+ args = append(args, "-generate-data-asset-diagram")
+ }
+ if generateReportPdf {
+ args = append(args, "-generate-report-pdf")
+ }
+ if generateRisksExcel {
+ args = append(args, "-generate-risks-excel")
+ }
+ if generateTagsExcel {
+ args = append(args, "-generate-tags-excel")
+ }
+ if generateRisksJSON {
+ args = append(args, "-generate-risks-json")
+ }
+ if generateTechnicalAssetsJSON {
+ args = append(args, "-generate-technical-assets-json")
+ }
+ if generateStatsJSON {
+ args = append(args, "-generate-stats-json")
+ }
+ self, nameError := os.Executable()
+ if nameError != nil {
+ panic(nameError)
+ }
+
+ cmd = exec.Command(self, args...) // #nosec G204
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ panic(errors.New(string(out)))
+ } else {
+ if s.config.Verbose && len(out) > 0 {
+ fmt.Println("---")
+ fmt.Print(string(out))
+ fmt.Println("---")
+ }
+ }
+}
diff --git a/pkg/server/hash.go b/pkg/server/hash.go
new file mode 100644
index 00000000..89072380
--- /dev/null
+++ b/pkg/server/hash.go
@@ -0,0 +1,35 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "crypto/sha512"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash/fnv"
+)
+
+func xor(key []byte, xor []byte) []byte {
+ if len(key) != len(xor) {
+ panic(errors.New("key length not matching XOR length"))
+ }
+ result := make([]byte, len(xor))
+ for i, b := range key {
+ result[i] = b ^ xor[i]
+ }
+ return result
+}
+
+func hashSHA256(key []byte) string {
+ hasher := sha512.New()
+ hasher.Write(key)
+ return hex.EncodeToString(hasher.Sum(nil))
+}
+
+func hash(s string) string {
+ h := fnv.New32a()
+ _, _ = h.Write([]byte(s))
+ return fmt.Sprintf("%v", h.Sum32())
+}
diff --git a/pkg/server/model.go b/pkg/server/model.go
new file mode 100644
index 00000000..af8a2a23
--- /dev/null
+++ b/pkg/server/model.go
@@ -0,0 +1,1374 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "gopkg.in/yaml.v3"
+
+ "github.com/gin-gonic/gin"
+ "github.com/google/uuid"
+ "github.com/threagile/threagile/pkg/docs"
+ "github.com/threagile/threagile/pkg/input"
+ "github.com/threagile/threagile/pkg/security/types"
+ "golang.org/x/crypto/argon2"
+)
+
+// creates a sub-folder (named by a new UUID) inside the token folder
+func (s *server) createNewModel(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ ok = s.checkObjectCreationThrottler(ginContext, "MODEL")
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+
+ aUuid := uuid.New().String()
+ err := os.Mkdir(folderNameForModel(folderNameOfKey, aUuid), 0700)
+ if err != nil {
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to create model",
+ })
+ return
+ }
+
+ aYaml := `title: New Threat Model
+threagile_version: ` + docs.ThreagileVersion + `
+author:
+ name: ""
+ homepage: ""
+date:
+business_overview:
+ description: ""
+ images: []
+technical_overview:
+ description: ""
+ images: []
+business_criticality: ""
+management_summary_comment: ""
+questions: {}
+abuse_cases: {}
+security_requirements: {}
+tags_available: []
+data_assets: {}
+technical_assets: {}
+trust_boundaries: {}
+shared_runtimes: {}
+individual_risk_categories: {}
+risk_tracking: {}
+diagram_tweak_nodesep: ""
+diagram_tweak_ranksep: ""
+diagram_tweak_edge_layout: ""
+diagram_tweak_suppress_edge_labels: false
+diagram_tweak_invisible_connections_between_assets: []
+diagram_tweak_same_rank_assets: []`
+
+ ok = s.writeModelYAML(ginContext, aYaml, key, folderNameForModel(folderNameOfKey, aUuid), "New Model Creation", true)
+ if ok {
+ ginContext.JSON(http.StatusCreated, gin.H{
+ "message": "model created",
+ "id": aUuid,
+ })
+ }
+}
+
+type payloadModels struct {
+ ID string `yaml:"id" json:"id"`
+ Title string `yaml:"title" json:"title"`
+ TimestampCreated time.Time `yaml:"timestamp_created" json:"timestamp_created"`
+ TimestampModified time.Time `yaml:"timestamp_modified" json:"timestamp_modified"`
+}
+
+func (s *server) listModels(ginContext *gin.Context) { // TODO currently returns error when any model is no longer valid in syntax, so eventually have some fallback to not just bark on an invalid model...
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+
+ result := make([]payloadModels, 0)
+ modelFolders, err := os.ReadDir(folderNameOfKey)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return
+ }
+ for _, dirEntry := range modelFolders {
+ if dirEntry.IsDir() {
+ modelStat, err := os.Stat(filepath.Join(folderNameOfKey, dirEntry.Name(), s.config.InputFile))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "unable to list model",
+ })
+ return
+ }
+ aModel, _, ok := s.readModel(ginContext, dirEntry.Name(), key, folderNameOfKey)
+ if !ok {
+ return
+ }
+ fileInfo, err := dirEntry.Info()
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "unable to get file info",
+ })
+ return
+ }
+ result = append(result, payloadModels{
+ ID: dirEntry.Name(),
+ Title: aModel.Title,
+ TimestampCreated: fileInfo.ModTime(),
+ TimestampModified: modelStat.ModTime(),
+ })
+ }
+ }
+ ginContext.JSON(http.StatusOK, result)
+}
+
+func (s *server) deleteModel(ginContext *gin.Context) {
+ folderNameOfKey, _, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ folder, ok := s.checkModelFolder(ginContext, ginContext.Param("model-id"), folderNameOfKey)
+ if ok {
+ if folder != filepath.Clean(folder) {
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "model-id is weird",
+ })
+ return
+ }
+ err := os.RemoveAll(folder)
+ if err != nil {
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "model not found",
+ })
+ return
+ }
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model deleted",
+ })
+ }
+}
+
+type payloadCover struct {
+ Title string `yaml:"title" json:"title"`
+ Date time.Time `yaml:"date" json:"date"`
+ Author input.Author `yaml:"author" json:"author"`
+}
+
+func (s *server) setCover(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadCover{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ modelInput.Title = payload.Title
+ if !payload.Date.IsZero() {
+ modelInput.Date = payload.Date.Format("2006-01-02")
+ }
+ modelInput.Author.Name = payload.Author.Name
+ modelInput.Author.Homepage = payload.Author.Homepage
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Cover Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model updated",
+ })
+ }
+ }
+}
+
+func (s *server) getCover(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "title": aModel.Title,
+ "date": aModel.Date,
+ "author": aModel.Author,
+ })
+ }
+}
+
+type payloadOverview struct {
+ ManagementSummaryComment string `yaml:"management_summary_comment" json:"management_summary_comment"`
+ BusinessCriticality string `yaml:"business_criticality" json:"business_criticality"`
+ BusinessOverview input.Overview `yaml:"business_overview" json:"business_overview"`
+ TechnicalOverview input.Overview `yaml:"technical_overview" json:"technical_overview"`
+}
+
+func (s *server) setOverview(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadOverview{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ criticality, err := types.ParseCriticality(payload.BusinessCriticality)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ modelInput.ManagementSummaryComment = payload.ManagementSummaryComment
+ modelInput.BusinessCriticality = criticality.String()
+ modelInput.BusinessOverview.Description = payload.BusinessOverview.Description
+ modelInput.BusinessOverview.Images = payload.BusinessOverview.Images
+ modelInput.TechnicalOverview.Description = payload.TechnicalOverview.Description
+ modelInput.TechnicalOverview.Images = payload.TechnicalOverview.Images
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Overview Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model updated",
+ })
+ }
+ }
+}
+
+func (s *server) getOverview(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "management_summary_comment": aModel.ManagementSummaryComment,
+ "business_criticality": aModel.BusinessCriticality,
+ "business_overview": aModel.BusinessOverview,
+ "technical_overview": aModel.TechnicalOverview,
+ })
+ }
+}
+
+type payloadAbuseCases map[string]string
+
+func (s *server) setAbuseCases(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadAbuseCases{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ modelInput.AbuseCases = payload
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Abuse Cases Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model updated",
+ })
+ }
+ }
+}
+
+func (s *server) getAbuseCases(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, aModel.AbuseCases)
+ }
+}
+
+type payloadSecurityRequirements map[string]string
+
+func (s *server) setSecurityRequirements(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadSecurityRequirements{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ modelInput.SecurityRequirements = payload
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Security Requirements Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "model updated",
+ })
+ }
+ }
+}
+
+func (s *server) getSecurityRequirements(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, aModel.SecurityRequirements)
+ }
+}
+
+type payloadDataAsset struct {
+ Title string `yaml:"title" json:"title"`
+ Id string `yaml:"id" json:"id"`
+ Description string `yaml:"description" json:"description"`
+ Usage string `yaml:"usage" json:"usage"`
+ Tags []string `yaml:"tags" json:"tags"`
+ Origin string `yaml:"origin" json:"origin"`
+ Owner string `yaml:"owner" json:"owner"`
+ Quantity string `yaml:"quantity" json:"quantity"`
+ Confidentiality string `yaml:"confidentiality" json:"confidentiality"`
+ Integrity string `yaml:"integrity" json:"integrity"`
+ Availability string `yaml:"availability" json:"availability"`
+ JustificationCiaRating string `yaml:"justification_cia_rating" json:"justification_cia_rating"`
+}
+
+func (s *server) getDataAssets(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, aModel.DataAssets)
+ }
+}
+
+func (s *server) getDataAsset(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, dataAsset := range modelInput.DataAssets {
+ if dataAsset.ID == ginContext.Param("data-asset-id") {
+ ginContext.JSON(http.StatusOK, gin.H{
+ title: dataAsset,
+ })
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "data asset not found",
+ })
+ }
+}
+
+func (s *server) deleteDataAsset(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ referencesDeleted := false
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, dataAsset := range modelInput.DataAssets {
+ if dataAsset.ID == ginContext.Param("data-asset-id") {
+ // also remove all usages of this data asset !!
+ for _, techAsset := range modelInput.TechnicalAssets {
+ if techAsset.DataAssetsProcessed != nil {
+ for i, parsedChangeCandidateAsset := range techAsset.DataAssetsProcessed {
+ referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset)
+ if referencedAsset == dataAsset.ID { // apply the removal
+ referencesDeleted = true
+ // Remove the element at index i
+ // TODO needs more testing
+ copy(techAsset.DataAssetsProcessed[i:], techAsset.DataAssetsProcessed[i+1:]) // Shift a[i+1:] left one index.
+ techAsset.DataAssetsProcessed[len(techAsset.DataAssetsProcessed)-1] = "" // Erase last element (write zero value).
+ techAsset.DataAssetsProcessed = techAsset.DataAssetsProcessed[:len(techAsset.DataAssetsProcessed)-1] // Truncate slice.
+ }
+ }
+ }
+ if techAsset.DataAssetsStored != nil {
+ for i, parsedChangeCandidateAsset := range techAsset.DataAssetsStored {
+ referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset)
+ if referencedAsset == dataAsset.ID { // apply the removal
+ referencesDeleted = true
+ // Remove the element at index i
+ // TODO needs more testing
+ copy(techAsset.DataAssetsStored[i:], techAsset.DataAssetsStored[i+1:]) // Shift a[i+1:] left one index.
+ techAsset.DataAssetsStored[len(techAsset.DataAssetsStored)-1] = "" // Erase last element (write zero value).
+ techAsset.DataAssetsStored = techAsset.DataAssetsStored[:len(techAsset.DataAssetsStored)-1] // Truncate slice.
+ }
+ }
+ }
+ if techAsset.CommunicationLinks != nil {
+ for title, commLink := range techAsset.CommunicationLinks {
+ for i, dataAssetSent := range commLink.DataAssetsSent {
+ referencedAsset := fmt.Sprintf("%v", dataAssetSent)
+ if referencedAsset == dataAsset.ID { // apply the removal
+ referencesDeleted = true
+ // Remove the element at index i
+ // TODO needs more testing
+ copy(techAsset.CommunicationLinks[title].DataAssetsSent[i:], techAsset.CommunicationLinks[title].DataAssetsSent[i+1:]) // Shift a[i+1:] left one index.
+ techAsset.CommunicationLinks[title].DataAssetsSent[len(techAsset.CommunicationLinks[title].DataAssetsSent)-1] = "" // Erase last element (write zero value).
+ x := techAsset.CommunicationLinks[title]
+ x.DataAssetsSent = techAsset.CommunicationLinks[title].DataAssetsSent[:len(techAsset.CommunicationLinks[title].DataAssetsSent)-1] // Truncate slice.
+ techAsset.CommunicationLinks[title] = x
+ }
+ }
+ for i, dataAssetReceived := range commLink.DataAssetsReceived {
+ referencedAsset := fmt.Sprintf("%v", dataAssetReceived)
+ if referencedAsset == dataAsset.ID { // apply the removal
+ referencesDeleted = true
+ // Remove the element at index i
+ // TODO needs more testing
+ copy(techAsset.CommunicationLinks[title].DataAssetsReceived[i:], techAsset.CommunicationLinks[title].DataAssetsReceived[i+1:]) // Shift a[i+1:] left one index.
+ techAsset.CommunicationLinks[title].DataAssetsReceived[len(techAsset.CommunicationLinks[title].DataAssetsReceived)-1] = "" // Erase last element (write zero value).
+ x := techAsset.CommunicationLinks[title]
+ x.DataAssetsReceived = techAsset.CommunicationLinks[title].DataAssetsReceived[:len(techAsset.CommunicationLinks[title].DataAssetsReceived)-1] // Truncate slice.
+ techAsset.CommunicationLinks[title] = x
+ }
+ }
+ }
+ }
+ }
+ for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories {
+ if individualRiskCat.RisksIdentified != nil {
+ for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified {
+ if individualRiskInstance.MostRelevantDataAsset == dataAsset.ID { // apply the removal
+ referencesDeleted = true
+ x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle]
+ x.MostRelevantDataAsset = "" // TODO needs more testing
+ modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x
+ }
+ }
+ }
+ }
+ // remove it itself
+ delete(modelInput.DataAssets, title)
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Deletion")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "data asset deleted",
+ "id": dataAsset.ID,
+ "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well
+ })
+ }
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "data asset not found",
+ })
+ }
+}
+
+func (s *server) setDataAsset(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, dataAsset := range modelInput.DataAssets {
+ if dataAsset.ID == ginContext.Param("data-asset-id") {
+ payload := payloadDataAsset{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ dataAssetInput, ok := s.populateDataAsset(ginContext, payload)
+ if !ok {
+ return
+ }
+ // in order to also update the title, remove the asset from the map and re-insert it (with new key)
+ delete(modelInput.DataAssets, title)
+ modelInput.DataAssets[payload.Title] = dataAssetInput
+ idChanged := dataAssetInput.ID != dataAsset.ID
+ if idChanged { // ID-CHANGE-PROPAGATION
+ // also update all usages to point to the new (changed) ID !!
+ for techAssetTitle, techAsset := range modelInput.TechnicalAssets {
+ if techAsset.DataAssetsProcessed != nil {
+ for i, parsedChangeCandidateAsset := range techAsset.DataAssetsProcessed {
+ referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset)
+ if referencedAsset == dataAsset.ID { // apply the ID change
+ modelInput.TechnicalAssets[techAssetTitle].DataAssetsProcessed[i] = dataAssetInput.ID
+ }
+ }
+ }
+ if techAsset.DataAssetsStored != nil {
+ for i, parsedChangeCandidateAsset := range techAsset.DataAssetsStored {
+ referencedAsset := fmt.Sprintf("%v", parsedChangeCandidateAsset)
+ if referencedAsset == dataAsset.ID { // apply the ID change
+ modelInput.TechnicalAssets[techAssetTitle].DataAssetsStored[i] = dataAssetInput.ID
+ }
+ }
+ }
+ if techAsset.CommunicationLinks != nil {
+ for title, commLink := range techAsset.CommunicationLinks {
+ for i, dataAssetSent := range commLink.DataAssetsSent {
+ referencedAsset := fmt.Sprintf("%v", dataAssetSent)
+ if referencedAsset == dataAsset.ID { // apply the ID change
+ modelInput.TechnicalAssets[techAssetTitle].CommunicationLinks[title].DataAssetsSent[i] = dataAssetInput.ID
+ }
+ }
+ for i, dataAssetReceived := range commLink.DataAssetsReceived {
+ referencedAsset := fmt.Sprintf("%v", dataAssetReceived)
+ if referencedAsset == dataAsset.ID { // apply the ID change
+ modelInput.TechnicalAssets[techAssetTitle].CommunicationLinks[title].DataAssetsReceived[i] = dataAssetInput.ID
+ }
+ }
+ }
+ }
+ }
+ for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories {
+ if individualRiskCat.RisksIdentified != nil {
+ for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified {
+ if individualRiskInstance.MostRelevantDataAsset == dataAsset.ID { // apply the ID change
+ x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle]
+ x.MostRelevantDataAsset = dataAssetInput.ID // TODO needs more testing
+ modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x
+ }
+ }
+ }
+ }
+ }
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "data asset updated",
+ "id": dataAssetInput.ID,
+ "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded
+ })
+ }
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "data asset not found",
+ })
+ }
+}
+
+func (s *server) createNewDataAsset(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadDataAsset{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ if _, exists := modelInput.DataAssets[payload.Title]; exists {
+ ginContext.JSON(http.StatusConflict, gin.H{
+ "error": "data asset with this title already exists",
+ })
+ return
+ }
+ // but later it will in memory keyed by its "id", so do this uniqueness check also
+ for _, asset := range modelInput.DataAssets {
+ if asset.ID == payload.Id {
+ ginContext.JSON(http.StatusConflict, gin.H{
+ "error": "data asset with this id already exists",
+ })
+ return
+ }
+ }
+ dataAssetInput, ok := s.populateDataAsset(ginContext, payload)
+ if !ok {
+ return
+ }
+ if modelInput.DataAssets == nil {
+ modelInput.DataAssets = make(map[string]input.DataAsset)
+ }
+ modelInput.DataAssets[payload.Title] = dataAssetInput
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Data Asset Creation")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "data asset created",
+ "id": dataAssetInput.ID,
+ })
+ }
+ }
+}
+
+func (s *server) populateDataAsset(ginContext *gin.Context, payload payloadDataAsset) (dataAssetInput input.DataAsset, ok bool) {
+ usage, err := types.ParseUsage(payload.Usage)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return dataAssetInput, false
+ }
+ quantity, err := types.ParseQuantity(payload.Quantity)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return dataAssetInput, false
+ }
+ confidentiality, err := types.ParseConfidentiality(payload.Confidentiality)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return dataAssetInput, false
+ }
+ integrity, err := types.ParseCriticality(payload.Integrity)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return dataAssetInput, false
+ }
+ availability, err := types.ParseCriticality(payload.Availability)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return dataAssetInput, false
+ }
+ dataAssetInput = input.DataAsset{
+ ID: payload.Id,
+ Description: payload.Description,
+ Usage: usage.String(),
+ Tags: lowerCaseAndTrim(payload.Tags),
+ Origin: payload.Origin,
+ Owner: payload.Owner,
+ Quantity: quantity.String(),
+ Confidentiality: confidentiality.String(),
+ Integrity: integrity.String(),
+ Availability: availability.String(),
+ JustificationCiaRating: payload.JustificationCiaRating,
+ }
+ return dataAssetInput, true
+}
+
+func (s *server) getTrustBoundaries(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, aModel.TrustBoundaries)
+ }
+}
+
+type payloadSharedRuntime struct {
+ Title string `yaml:"title" json:"title"`
+ Id string `yaml:"id" json:"id"`
+ Description string `yaml:"description" json:"description"`
+ Tags []string `yaml:"tags" json:"tags"`
+ TechnicalAssetsRunning []string `yaml:"technical_assets_running" json:"technical_assets_running"`
+}
+
+func (s *server) setSharedRuntime(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, sharedRuntime := range modelInput.SharedRuntimes {
+ if sharedRuntime.ID == ginContext.Param("shared-runtime-id") {
+ payload := payloadSharedRuntime{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ sharedRuntimeInput, ok := populateSharedRuntime(ginContext, payload)
+ if !ok {
+ return
+ }
+ // in order to also update the title, remove the shared runtime from the map and re-insert it (with new key)
+ delete(modelInput.SharedRuntimes, title)
+ modelInput.SharedRuntimes[payload.Title] = sharedRuntimeInput
+ idChanged := sharedRuntimeInput.ID != sharedRuntime.ID
+ if idChanged { // ID-CHANGE-PROPAGATION
+ for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories {
+ if individualRiskCat.RisksIdentified != nil {
+ for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified {
+ if individualRiskInstance.MostRelevantSharedRuntime == sharedRuntime.ID { // apply the ID change
+ x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle]
+ x.MostRelevantSharedRuntime = sharedRuntimeInput.ID // TODO needs more testing
+ modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x
+ }
+ }
+ }
+ }
+ }
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Update")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "shared runtime updated",
+ "id": sharedRuntimeInput.ID,
+ "id_changed": idChanged, // in order to signal to clients, that other model parts might've received updates as well and should be reloaded
+ })
+ }
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "shared runtime not found",
+ })
+ }
+}
+
+func (s *server) getSharedRuntime(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, sharedRuntime := range modelInput.SharedRuntimes {
+ if sharedRuntime.ID == ginContext.Param("shared-runtime-id") {
+ ginContext.JSON(http.StatusOK, gin.H{
+ title: sharedRuntime,
+ })
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "shared runtime not found",
+ })
+ }
+}
+
+func (s *server) createNewSharedRuntime(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ payload := payloadSharedRuntime{}
+ err := ginContext.BindJSON(&payload)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "unable to parse request payload",
+ })
+ return
+ }
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ if _, exists := modelInput.SharedRuntimes[payload.Title]; exists {
+ ginContext.JSON(http.StatusConflict, gin.H{
+ "error": "shared runtime with this title already exists",
+ })
+ return
+ }
+ // but later it will in memory keyed by its "id", so do this uniqueness check also
+ for _, sharedRuntime := range modelInput.SharedRuntimes {
+ if sharedRuntime.ID == payload.Id {
+ ginContext.JSON(http.StatusConflict, gin.H{
+ "error": "shared runtime with this id already exists",
+ })
+ return
+ }
+ }
+ if !checkTechnicalAssetsExisting(modelInput, payload.TechnicalAssetsRunning) {
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": "referenced technical asset does not exist",
+ })
+ return
+ }
+ sharedRuntimeInput, ok := populateSharedRuntime(ginContext, payload)
+ if !ok {
+ return
+ }
+ if modelInput.SharedRuntimes == nil {
+ modelInput.SharedRuntimes = make(map[string]input.SharedRuntime)
+ }
+ modelInput.SharedRuntimes[payload.Title] = sharedRuntimeInput
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Creation")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "shared runtime created",
+ "id": sharedRuntimeInput.ID,
+ })
+ }
+ }
+}
+
+func checkTechnicalAssetsExisting(modelInput input.Model, techAssetIDs []string) (ok bool) {
+ for _, techAssetID := range techAssetIDs {
+ exists := false
+ for _, val := range modelInput.TechnicalAssets {
+ if val.ID == techAssetID {
+ exists = true
+ break
+ }
+ }
+ if !exists {
+ return false
+ }
+ }
+ return true
+}
+
+func populateSharedRuntime(_ *gin.Context, payload payloadSharedRuntime) (sharedRuntimeInput input.SharedRuntime, ok bool) {
+ sharedRuntimeInput = input.SharedRuntime{
+ ID: payload.Id,
+ Description: payload.Description,
+ Tags: lowerCaseAndTrim(payload.Tags),
+ TechnicalAssetsRunning: payload.TechnicalAssetsRunning,
+ }
+ return sharedRuntimeInput, true
+}
+
+func (s *server) deleteSharedRuntime(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ modelInput, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ referencesDeleted := false
+ // yes, here keyed by title in YAML for better readability in the YAML file itself
+ for title, sharedRuntime := range modelInput.SharedRuntimes {
+ if sharedRuntime.ID == ginContext.Param("shared-runtime-id") {
+ // also remove all usages of this shared runtime !!
+ for individualRiskCatTitle, individualRiskCat := range modelInput.IndividualRiskCategories {
+ if individualRiskCat.RisksIdentified != nil {
+ for individualRiskInstanceTitle, individualRiskInstance := range individualRiskCat.RisksIdentified {
+ if individualRiskInstance.MostRelevantSharedRuntime == sharedRuntime.ID { // apply the removal
+ referencesDeleted = true
+ x := modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle]
+ x.MostRelevantSharedRuntime = "" // TODO needs more testing
+ modelInput.IndividualRiskCategories[individualRiskCatTitle].RisksIdentified[individualRiskInstanceTitle] = x
+ }
+ }
+ }
+ }
+ // remove it itself
+ delete(modelInput.SharedRuntimes, title)
+ ok = s.writeModel(ginContext, key, folderNameOfKey, &modelInput, "Shared Runtime Deletion")
+ if ok {
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "shared runtime deleted",
+ "id": sharedRuntime.ID,
+ "references_deleted": referencesDeleted, // in order to signal to clients, that other model parts might've been deleted as well
+ })
+ }
+ return
+ }
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "shared runtime not found",
+ })
+ }
+}
+
+func (s *server) getSharedRuntimes(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ aModel, _, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ ginContext.JSON(http.StatusOK, aModel.SharedRuntimes)
+ }
+}
+
+func (s *server) readModel(ginContext *gin.Context, modelUUID string, key []byte, folderNameOfKey string) (modelInputResult input.Model, yamlText string, ok bool) {
+ modelFolder, ok := s.checkModelFolder(ginContext, modelUUID, folderNameOfKey)
+ if !ok {
+ return modelInputResult, yamlText, false
+ }
+ cryptoKey := generateKeyFromAlreadyStrongRandomInput(key)
+ block, err := aes.NewCipher(cryptoKey)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+ aesGcm, err := cipher.NewGCM(block)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+
+ fileBytes, err := os.ReadFile(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile)))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+
+ nonce := fileBytes[0:12]
+ ciphertext := fileBytes[12:]
+ plaintext, err := aesGcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+
+ r, err := gzip.NewReader(bytes.NewReader(plaintext))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+ buf := new(bytes.Buffer)
+ _, _ = buf.ReadFrom(r)
+ modelInput := new(input.Model).Defaults()
+ yamlBytes := buf.Bytes()
+ err = yaml.Unmarshal(yamlBytes, &modelInput)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to open model",
+ })
+ return modelInputResult, yamlText, false
+ }
+ return *modelInput, string(yamlBytes), true
+}
+
+func (s *server) writeModel(ginContext *gin.Context, key []byte, folderNameOfKey string, modelInput *input.Model, changeReasonForHistory string) (ok bool) {
+ modelFolder, ok := s.checkModelFolder(ginContext, ginContext.Param("model-id"), folderNameOfKey)
+ if ok {
+ modelInput.ThreagileVersion = docs.ThreagileVersion
+ yamlBytes, err := yaml.Marshal(modelInput)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ /*
+ yamlBytes = model.ReformatYAML(yamlBytes)
+ */
+ return s.writeModelYAML(ginContext, string(yamlBytes), key, modelFolder, changeReasonForHistory, false)
+ }
+ return false
+}
+
+func (s *server) checkModelFolder(ginContext *gin.Context, modelUUID string, folderNameOfKey string) (modelFolder string, ok bool) {
+ uuidParsed, err := uuid.Parse(modelUUID)
+ if err != nil {
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "model not found",
+ })
+ return modelFolder, false
+ }
+ modelFolder = folderNameForModel(folderNameOfKey, uuidParsed.String())
+ if _, err := os.Stat(modelFolder); os.IsNotExist(err) {
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "model not found",
+ })
+ return modelFolder, false
+ }
+ return modelFolder, true
+}
+
+func (s *server) getModel(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+ _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if ok {
+ tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-*.yaml")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ err = os.WriteFile(tmpResultFile.Name(), []byte(yamlText), 0400)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to stream model file",
+ })
+ return
+ }
+ defer func() { _ = os.Remove(tmpResultFile.Name()) }()
+ ginContext.FileAttachment(tmpResultFile.Name(), s.config.InputFile)
+ }
+}
+
+// fully replaces threagile.yaml in sub-folder given by UUID
+func (s *server) importModel(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer s.unlockFolder(folderNameOfKey)
+
+ aUuid := ginContext.Param("model-id") // UUID is syntactically validated in readModel+checkModelFolder (next line) via uuid.Parse(modelUUID)
+ _, _, ok = s.readModel(ginContext, aUuid, key, folderNameOfKey)
+ if ok {
+ // first analyze it simply by executing the full risk process (just discard the result) to ensure that everything would work
+ yamlContent, ok := s.execute(ginContext, true)
+ if ok {
+ // if we're here, then no problem was raised, so ok to proceed
+ ok = s.writeModelYAML(ginContext, string(yamlContent), key, folderNameForModel(folderNameOfKey, aUuid), "Model Import", false)
+ if ok {
+ ginContext.JSON(http.StatusCreated, gin.H{
+ "message": "model imported",
+ })
+ }
+ }
+ }
+}
+
+func (s *server) analyzeModelOnServerDirectly(ginContext *gin.Context) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer func() {
+ s.unlockFolder(folderNameOfKey)
+ var err error
+ if r := recover(); r != nil {
+ err = r.(error)
+ if s.config.Verbose {
+ log.Println(err)
+ }
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": strings.TrimSpace(err.Error()),
+ })
+ ok = false
+ }
+ }()
+
+ dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+
+ _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if !ok {
+ return
+ }
+ tmpModelFile, err := os.CreateTemp(s.config.TempFolder, "threagile-direct-analyze-*")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ defer func() { _ = os.Remove(tmpModelFile.Name()) }()
+ tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-direct-analyze-")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ defer func() { _ = os.RemoveAll(tmpOutputDir) }()
+ tmpResultFile, err := os.CreateTemp(s.config.TempFolder, "threagile-result-*.zip")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ defer func() { _ = os.Remove(tmpResultFile.Name()) }()
+
+ err = os.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400)
+
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, true, true, true, true, true, true, true, true, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ err = os.WriteFile(filepath.Join(tmpOutputDir, s.config.InputFile), []byte(yamlText), 0400)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+
+ files := []string{
+ filepath.Join(tmpOutputDir, s.config.InputFile),
+ filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG),
+ filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG),
+ filepath.Join(tmpOutputDir, s.config.ReportFilename),
+ filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename),
+ filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonRisksFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename),
+ filepath.Join(tmpOutputDir, s.config.JsonStatsFilename),
+ }
+ if s.config.KeepDiagramSourceFiles {
+ files = append(files, filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenameDOT))
+ files = append(files, filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenameDOT))
+ }
+ err = zipFiles(tmpResultFile.Name(), files)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ if s.config.Verbose {
+ fmt.Println("Streaming back result file: " + tmpResultFile.Name())
+ }
+ ginContext.FileAttachment(tmpResultFile.Name(), "threagile-result.zip")
+}
+
+func (s *server) writeModelYAML(ginContext *gin.Context, yaml string, key []byte, modelFolder string, changeReasonForHistory string, skipBackup bool) (ok bool) {
+ if s.config.Verbose {
+ fmt.Println("about to write " + strconv.Itoa(len(yaml)) + " bytes of yaml into model folder: " + modelFolder)
+ }
+ var b bytes.Buffer
+ w := gzip.NewWriter(&b)
+ _, _ = w.Write([]byte(yaml))
+ _ = w.Close()
+ plaintext := b.Bytes()
+ cryptoKey := generateKeyFromAlreadyStrongRandomInput(key)
+ block, err := aes.NewCipher(cryptoKey)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ // Never use more than 2^32 random nonces with a given key because of the risk of a repeat.
+ nonce := make([]byte, 12)
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ aesGcm, err := cipher.NewGCM(block)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ ciphertext := aesGcm.Seal(nil, nonce, plaintext, nil)
+ if !skipBackup {
+ err = s.backupModelToHistory(modelFolder, changeReasonForHistory)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ }
+ f, err := os.Create(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile)))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to write model",
+ })
+ return false
+ }
+ _, _ = f.Write(nonce)
+ _, _ = f.Write(ciphertext)
+ _ = f.Close()
+ return true
+}
+
+func (s *server) lockFolder(folderName string) {
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+ _, exists := s.locksByFolderName[folderName]
+ if !exists {
+ s.locksByFolderName[folderName] = &sync.Mutex{}
+ }
+ s.locksByFolderName[folderName].Lock()
+}
+
+func (s *server) unlockFolder(folderName string) {
+ if _, exists := s.locksByFolderName[folderName]; exists {
+ s.locksByFolderName[folderName].Unlock()
+ delete(s.locksByFolderName, folderName)
+ }
+}
+
+func (s *server) backupModelToHistory(modelFolder string, changeReasonForHistory string) (err error) {
+ historyFolder := filepath.Join(modelFolder, "history")
+ if _, err := os.Stat(historyFolder); os.IsNotExist(err) {
+ err = os.Mkdir(historyFolder, 0700)
+ if err != nil {
+ return err
+ }
+ }
+ inputModel, err := os.ReadFile(filepath.Clean(filepath.Join(modelFolder, s.config.InputFile)))
+ if err != nil {
+ return err
+ }
+ historyFile := filepath.Join(historyFolder, time.Now().Format("2006-01-02 15:04:05")+" "+changeReasonForHistory+".backup")
+ err = os.WriteFile(historyFile, inputModel, 0400)
+ if err != nil {
+ return err
+ }
+ // now delete any old files if over limit to keep
+ files, err := os.ReadDir(historyFolder)
+ if err != nil {
+ return err
+ }
+ if len(files) > s.config.BackupHistoryFilesToKeep {
+ requiredToDelete := len(files) - s.config.BackupHistoryFilesToKeep
+ sort.Slice(files, func(i, j int) bool {
+ return files[i].Name() < files[j].Name()
+ })
+ for _, file := range files {
+ requiredToDelete--
+ if file.Name() != filepath.Clean(file.Name()) {
+ return fmt.Errorf("weird file name %v", file.Name())
+ }
+ err = os.Remove(filepath.Join(historyFolder, file.Name()))
+ if err != nil {
+ return err
+ }
+ if requiredToDelete <= 0 {
+ break
+ }
+ }
+ }
+ return
+}
+
+func folderNameForModel(folderNameOfKey string, uuid string) string {
+ return filepath.Join(folderNameOfKey, uuid)
+}
+
+type argon2Params struct {
+ memory uint32
+ iterations uint32
+ parallelism uint8
+ saltLength uint32
+ keyLength uint32
+}
+
+func generateKeyFromAlreadyStrongRandomInput(alreadyRandomInput []byte) []byte {
+ // Establish the parameters to use for Argon2.
+ p := &argon2Params{
+ memory: 64 * 1024,
+ iterations: 3,
+ parallelism: 2,
+ saltLength: 16,
+ keyLength: keySize,
+ }
+ // As the input is already cryptographically secure random, the salt is simply the first n bytes
+ salt := alreadyRandomInput[0:p.saltLength]
+ hash := argon2.IDKey(alreadyRandomInput[p.saltLength:], salt, p.iterations, p.memory, p.parallelism, p.keyLength)
+ return hash
+}
+
+func lowerCaseAndTrim(tags []string) []string {
+ for i := range tags {
+ tags[i] = strings.ToLower(strings.TrimSpace(tags[i]))
+ }
+ return tags
+}
diff --git a/pkg/server/report.go b/pkg/server/report.go
new file mode 100644
index 00000000..f8349200
--- /dev/null
+++ b/pkg/server/report.go
@@ -0,0 +1,177 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/gin-gonic/gin"
+)
+
+type responseType int
+
+const (
+ dataFlowDiagram responseType = iota
+ dataAssetDiagram
+ reportPDF
+ risksExcel
+ tagsExcel
+ risksJSON
+ technicalAssetsJSON
+ statsJSON
+)
+
+func (s *server) streamDataFlowDiagram(ginContext *gin.Context) {
+ s.streamResponse(ginContext, dataFlowDiagram)
+}
+
+func (s *server) streamDataAssetDiagram(ginContext *gin.Context) {
+ s.streamResponse(ginContext, dataAssetDiagram)
+}
+
+func (s *server) streamReportPDF(ginContext *gin.Context) {
+ s.streamResponse(ginContext, reportPDF)
+}
+
+func (s *server) streamRisksExcel(ginContext *gin.Context) {
+ s.streamResponse(ginContext, risksExcel)
+}
+
+func (s *server) streamTagsExcel(ginContext *gin.Context) {
+ s.streamResponse(ginContext, tagsExcel)
+}
+
+func (s *server) streamRisksJSON(ginContext *gin.Context) {
+ s.streamResponse(ginContext, risksJSON)
+}
+
+func (s *server) streamTechnicalAssetsJSON(ginContext *gin.Context) {
+ s.streamResponse(ginContext, technicalAssetsJSON)
+}
+
+func (s *server) streamStatsJSON(ginContext *gin.Context) {
+ s.streamResponse(ginContext, statsJSON)
+}
+
+func (s *server) streamResponse(ginContext *gin.Context, responseType responseType) {
+ folderNameOfKey, key, ok := s.checkTokenToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.lockFolder(folderNameOfKey)
+ defer func() {
+ s.unlockFolder(folderNameOfKey)
+ var err error
+ if r := recover(); r != nil {
+ err = r.(error)
+ if s.config.Verbose {
+ log.Println(err)
+ }
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": strings.TrimSpace(err.Error()),
+ })
+ ok = false
+ }
+ }()
+ dpi, err := strconv.Atoi(ginContext.DefaultQuery("dpi", strconv.Itoa(s.config.GraphvizDPI)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ _, yamlText, ok := s.readModel(ginContext, ginContext.Param("model-id"), key, folderNameOfKey)
+ if !ok {
+ return
+ }
+ tmpModelFile, err := os.CreateTemp(s.config.TempFolder, "threagile-render-*")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ defer func() { _ = os.Remove(tmpModelFile.Name()) }()
+ tmpOutputDir, err := os.MkdirTemp(s.config.TempFolder, "threagile-render-")
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ defer func() { _ = os.RemoveAll(tmpOutputDir) }()
+ err = os.WriteFile(tmpModelFile.Name(), []byte(yamlText), 0400)
+ if responseType == dataFlowDiagram {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, true, false, false, false, false, false, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.File(filepath.Clean(filepath.Join(tmpOutputDir, s.config.DataFlowDiagramFilenamePNG)))
+ } else if responseType == dataAssetDiagram {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, true, false, false, false, false, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.File(filepath.Clean(filepath.Join(tmpOutputDir, s.config.DataAssetDiagramFilenamePNG)))
+ } else if responseType == reportPDF {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, true, false, false, false, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ReportFilename)), s.config.ReportFilename)
+ } else if responseType == risksExcel {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, true, false, false, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ExcelRisksFilename)), s.config.ExcelRisksFilename)
+ } else if responseType == tagsExcel {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, true, false, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.FileAttachment(filepath.Clean(filepath.Join(tmpOutputDir, s.config.ExcelTagsFilename)), s.config.ExcelTagsFilename)
+ } else if responseType == risksJSON {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, true, false, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonRisksFilename)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download
+ } else if responseType == technicalAssetsJSON {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, true, true, false, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonTechnicalAssetsFilename)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download
+ } else if responseType == statsJSON {
+ s.doItViaRuntimeCall(tmpModelFile.Name(), tmpOutputDir, false, false, false, false, false, false, false, true, dpi)
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ jsonData, err := os.ReadFile(filepath.Clean(filepath.Join(tmpOutputDir, s.config.JsonStatsFilename)))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.Data(http.StatusOK, "application/json", jsonData) // stream directly with JSON content-type in response instead of file download
+ }
+}
diff --git a/pkg/server/server.go b/pkg/server/server.go
new file mode 100644
index 00000000..2801a469
--- /dev/null
+++ b/pkg/server/server.go
@@ -0,0 +1,299 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+
+package server
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/threagile/threagile/pkg/common"
+ "github.com/threagile/threagile/pkg/model"
+
+ "github.com/gin-gonic/gin"
+ "github.com/threagile/threagile/pkg/docs"
+ "github.com/threagile/threagile/pkg/security/risks"
+ "github.com/threagile/threagile/pkg/security/types"
+)
+
+type server struct {
+ config *common.Config
+ successCount int
+ errorCount int
+ globalLock sync.Mutex
+ throttlerLock sync.Mutex
+ createdObjectsThrottler map[string][]int64
+ mapTokenHashToTimeoutStruct map[string]timeoutStruct
+ mapFolderNameToTokenHash map[string]string
+ extremeShortTimeoutsForTesting bool
+ locksByFolderName map[string]*sync.Mutex
+ customRiskRules map[string]*model.CustomRisk
+}
+
+func RunServer(config *common.Config) {
+ s := &server{
+ config: config,
+ createdObjectsThrottler: make(map[string][]int64),
+ mapTokenHashToTimeoutStruct: make(map[string]timeoutStruct),
+ mapFolderNameToTokenHash: make(map[string]string),
+ extremeShortTimeoutsForTesting: false,
+ locksByFolderName: make(map[string]*sync.Mutex),
+ }
+ router := gin.Default()
+ router.LoadHTMLGlob(filepath.Join(s.config.ServerFolder, "s", "static", "*.html")) // <==
+ router.GET("/", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "index.html", gin.H{})
+ })
+ router.HEAD("/", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "index.html", gin.H{})
+ })
+ router.StaticFile("/threagile.png", filepath.Join(s.config.ServerFolder, "s", "static", "threagile.png")) // <==
+ router.StaticFile("/site.webmanifest", filepath.Join(s.config.ServerFolder, "s", "static", "site.webmanifest"))
+ router.StaticFile("/favicon.ico", filepath.Join(s.config.ServerFolder, "s", "static", "favicon.ico"))
+ router.StaticFile("/favicon-32x32.png", filepath.Join(s.config.ServerFolder, "s", "static", "favicon-32x32.png"))
+ router.StaticFile("/favicon-16x16.png", filepath.Join(s.config.ServerFolder, "s", "static", "favicon-16x16.png"))
+ router.StaticFile("/apple-touch-icon.png", filepath.Join(s.config.ServerFolder, "s", "static", "apple-touch-icon.png"))
+ router.StaticFile("/android-chrome-512x512.png", filepath.Join(s.config.ServerFolder, "s", "static", "android-chrome-512x512.png"))
+ router.StaticFile("/android-chrome-192x192.png", filepath.Join(s.config.ServerFolder, "s", "static", "android-chrome-192x192.png"))
+
+ router.StaticFile("/schema.json", filepath.Join(s.config.AppFolder, "schema.json"))
+ router.StaticFile("/live-templates.txt", filepath.Join(s.config.AppFolder, "live-templates.txt"))
+ router.StaticFile("/openapi.yaml", filepath.Join(s.config.AppFolder, "openapi.yaml"))
+ router.StaticFile("/swagger-ui/", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/index.html"))
+ router.StaticFile("/swagger-ui/index.html", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/index.html"))
+ router.StaticFile("/swagger-ui/oauth2-redirect.html", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/oauth2-redirect.html"))
+ router.StaticFile("/swagger-ui/swagger-ui.css", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui.css"))
+ router.StaticFile("/swagger-ui/swagger-ui.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui.js"))
+ router.StaticFile("/swagger-ui/swagger-ui-bundle.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui-bundle.js"))
+ router.StaticFile("/swagger-ui/swagger-ui-standalone-preset.js", filepath.Join(s.config.ServerFolder, "s", "static", "swagger-ui/swagger-ui-standalone-preset.js")) // <==
+
+ router.GET("/threagile-example-model.yaml", s.exampleFile)
+ router.GET("/threagile-stub-model.yaml", s.stubFile)
+
+ router.GET("/meta/ping", func(c *gin.Context) {
+ c.JSON(200, gin.H{
+ "message": "pong",
+ })
+ })
+ router.GET("/meta/version", func(c *gin.Context) {
+ c.JSON(200, gin.H{
+ "version": docs.ThreagileVersion,
+ "build_timestamp": s.config.BuildTimestamp,
+ })
+ })
+ router.GET("/meta/types", func(c *gin.Context) {
+ c.JSON(200, gin.H{
+ "quantity": arrayOfStringValues(types.QuantityValues()),
+ "confidentiality": arrayOfStringValues(types.ConfidentialityValues()),
+ "criticality": arrayOfStringValues(types.CriticalityValues()),
+ "technical_asset_type": arrayOfStringValues(types.TechnicalAssetTypeValues()),
+ "technical_asset_size": arrayOfStringValues(types.TechnicalAssetSizeValues()),
+ "authorization": arrayOfStringValues(types.AuthorizationValues()),
+ "authentication": arrayOfStringValues(types.AuthenticationValues()),
+ "usage": arrayOfStringValues(types.UsageValues()),
+ "encryption": arrayOfStringValues(types.EncryptionStyleValues()),
+ "data_format": arrayOfStringValues(types.DataFormatValues()),
+ "protocol": arrayOfStringValues(types.ProtocolValues()),
+ "technical_asset_technology": arrayOfStringValues(types.TechnicalAssetTechnologyValues()),
+ "technical_asset_machine": arrayOfStringValues(types.TechnicalAssetMachineValues()),
+ "trust_boundary_type": arrayOfStringValues(types.TrustBoundaryTypeValues()),
+ "data_breach_probability": arrayOfStringValues(types.DataBreachProbabilityValues()),
+ "risk_severity": arrayOfStringValues(types.RiskSeverityValues()),
+ "risk_exploitation_likelihood": arrayOfStringValues(types.RiskExploitationLikelihoodValues()),
+ "risk_exploitation_impact": arrayOfStringValues(types.RiskExploitationImpactValues()),
+ "risk_function": arrayOfStringValues(types.RiskFunctionValues()),
+ "risk_status": arrayOfStringValues(types.RiskStatusValues()),
+ "stride": arrayOfStringValues(types.STRIDEValues()),
+ })
+ })
+
+ // TODO router.GET("/meta/risk-rules", listRiskRules)
+ // TODO router.GET("/meta/model-macros", listModelMacros)
+
+ router.GET("/meta/stats", s.stats)
+
+ router.POST("/direct/analyze", s.analyze)
+ router.POST("/direct/check", s.check)
+ router.GET("/direct/stub", s.stubFile)
+
+ router.POST("/auth/keys", s.createKey)
+ router.DELETE("/auth/keys", s.deleteKey)
+ router.POST("/auth/tokens", s.createToken)
+ router.DELETE("/auth/tokens", s.deleteToken)
+
+ router.POST("/models", s.createNewModel)
+ router.GET("/models", s.listModels)
+ router.DELETE("/models/:model-id", s.deleteModel)
+ router.GET("/models/:model-id", s.getModel)
+ router.PUT("/models/:model-id", s.importModel)
+ router.GET("/models/:model-id/data-flow-diagram", s.streamDataFlowDiagram)
+ router.GET("/models/:model-id/data-asset-diagram", s.streamDataAssetDiagram)
+ router.GET("/models/:model-id/report-pdf", s.streamReportPDF)
+ router.GET("/models/:model-id/risks-excel", s.streamRisksExcel)
+ router.GET("/models/:model-id/tags-excel", s.streamTagsExcel)
+ router.GET("/models/:model-id/risks", s.streamRisksJSON)
+ router.GET("/models/:model-id/technical-assets", s.streamTechnicalAssetsJSON)
+ router.GET("/models/:model-id/stats", s.streamStatsJSON)
+ router.GET("/models/:model-id/analysis", s.analyzeModelOnServerDirectly)
+
+ router.GET("/models/:model-id/cover", s.getCover)
+ router.PUT("/models/:model-id/cover", s.setCover)
+ router.GET("/models/:model-id/overview", s.getOverview)
+ router.PUT("/models/:model-id/overview", s.setOverview)
+ //router.GET("/models/:model-id/questions", getQuestions)
+ //router.PUT("/models/:model-id/questions", setQuestions)
+ router.GET("/models/:model-id/abuse-cases", s.getAbuseCases)
+ router.PUT("/models/:model-id/abuse-cases", s.setAbuseCases)
+ router.GET("/models/:model-id/security-requirements", s.getSecurityRequirements)
+ router.PUT("/models/:model-id/security-requirements", s.setSecurityRequirements)
+ //router.GET("/models/:model-id/tags", getTags)
+ //router.PUT("/models/:model-id/tags", setTags)
+
+ router.GET("/models/:model-id/data-assets", s.getDataAssets)
+ router.POST("/models/:model-id/data-assets", s.createNewDataAsset)
+ router.GET("/models/:model-id/data-assets/:data-asset-id", s.getDataAsset)
+ router.PUT("/models/:model-id/data-assets/:data-asset-id", s.setDataAsset)
+ router.DELETE("/models/:model-id/data-assets/:data-asset-id", s.deleteDataAsset)
+
+ router.GET("/models/:model-id/trust-boundaries", s.getTrustBoundaries)
+ // router.POST("/models/:model-id/trust-boundaries", createNewTrustBoundary)
+ // router.GET("/models/:model-id/trust-boundaries/:trust-boundary-id", getTrustBoundary)
+ // router.PUT("/models/:model-id/trust-boundaries/:trust-boundary-id", setTrustBoundary)
+ // router.DELETE("/models/:model-id/trust-boundaries/:trust-boundary-id", deleteTrustBoundary)
+
+ router.GET("/models/:model-id/shared-runtimes", s.getSharedRuntimes)
+ router.POST("/models/:model-id/shared-runtimes", s.createNewSharedRuntime)
+ router.GET("/models/:model-id/shared-runtimes/:shared-runtime-id", s.getSharedRuntime)
+ router.PUT("/models/:model-id/shared-runtimes/:shared-runtime-id", s.setSharedRuntime)
+ router.DELETE("/models/:model-id/shared-runtimes/:shared-runtime-id", s.deleteSharedRuntime)
+
+ reporter := common.DefaultProgressReporter{Verbose: s.config.Verbose}
+ s.customRiskRules = model.LoadCustomRiskRules(s.config.RiskRulesPlugins, reporter)
+
+ fmt.Println("Threagile s running...")
+ _ = router.Run(":" + strconv.Itoa(s.config.ServerPort)) // listen and serve on 0.0.0.0:8080 or whatever port was specified
+}
+
+func (s *server) exampleFile(ginContext *gin.Context) {
+ example, err := os.ReadFile(filepath.Join(s.config.AppFolder, "threagile-example-model.yaml"))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.Data(http.StatusOK, gin.MIMEYAML, example)
+}
+
+func (s *server) stubFile(ginContext *gin.Context) {
+ stub, err := os.ReadFile(filepath.Join(s.config.AppFolder, "threagile-stub-model.yaml"))
+ if err != nil {
+ handleErrorInServiceCall(err, ginContext)
+ return
+ }
+ ginContext.Data(http.StatusOK, gin.MIMEYAML, s.addSupportedTags(stub)) // TODO use also the MIMEYAML way of serving YAML in model export?
+}
+
+func (s *server) addSupportedTags(input []byte) []byte {
+ // add distinct tags as "tags_available"
+ supportedTags := make(map[string]bool)
+ for _, customRule := range s.customRiskRules {
+ for _, tag := range customRule.Tags {
+ supportedTags[strings.ToLower(tag)] = true
+ }
+ }
+
+ for _, rule := range risks.GetBuiltInRiskRules() {
+ for _, tag := range rule.SupportedTags() {
+ supportedTags[strings.ToLower(tag)] = true
+ }
+ }
+
+ tags := make([]string, 0, len(supportedTags))
+ for t := range supportedTags {
+ tags = append(tags, t)
+ }
+ if len(tags) == 0 {
+ return input
+ }
+ sort.Strings(tags)
+ if s.config.Verbose {
+ fmt.Print("Supported tags of all risk rules: ")
+ for i, tag := range tags {
+ if i > 0 {
+ fmt.Print(", ")
+ }
+ fmt.Print(tag)
+ }
+ fmt.Println()
+ }
+ replacement := "tags_available:"
+ for _, tag := range tags {
+ replacement += "\n - " + tag
+ }
+ return []byte(strings.Replace(string(input), "tags_available:", replacement, 1))
+}
+
+func arrayOfStringValues(values []types.TypeEnum) []string {
+ result := make([]string, 0)
+ for _, value := range values {
+ result = append(result, value.String())
+ }
+ return result
+}
+
+func (s *server) stats(ginContext *gin.Context) {
+ keyCount, modelCount := 0, 0
+ keyFolders, err := os.ReadDir(filepath.Join(s.config.ServerFolder, s.config.KeyFolder))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to collect stats",
+ })
+ return
+ }
+ for _, keyFolder := range keyFolders {
+ if len(keyFolder.Name()) == 128 { // it's a sha512 token hash probably, so count it as token folder for the stats
+ keyCount++
+ if keyFolder.Name() != filepath.Clean(keyFolder.Name()) {
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "weird file path",
+ })
+ return
+ }
+ modelFolders, err := os.ReadDir(filepath.Join(s.config.ServerFolder, s.config.KeyFolder, keyFolder.Name()))
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to collect stats",
+ })
+ return
+ }
+ for _, modelFolder := range modelFolders {
+ if len(modelFolder.Name()) == 36 { // it's a uuid model folder probably, so count it as model folder for the stats
+ modelCount++
+ }
+ }
+ }
+ }
+ // TODO collect and deliver more stats (old model count?) and health info
+ ginContext.JSON(http.StatusOK, gin.H{
+ "key_count": keyCount,
+ "model_count": modelCount,
+ "success_count": s.successCount,
+ "error_count": s.errorCount,
+ })
+}
+
+func handleErrorInServiceCall(err error, ginContext *gin.Context) {
+ log.Println(err)
+ ginContext.JSON(http.StatusBadRequest, gin.H{
+ "error": strings.TrimSpace(err.Error()),
+ })
+}
diff --git a/pkg/server/token.go b/pkg/server/token.go
new file mode 100644
index 00000000..d9acb2b6
--- /dev/null
+++ b/pkg/server/token.go
@@ -0,0 +1,297 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/gin-gonic/gin"
+)
+
+const keySize = 32
+
+type keyHeader struct {
+ Key string `header:"key"`
+}
+
+type timeoutStruct struct {
+ xorRand []byte
+ createdNanoTime, lastAccessedNanoTime int64
+}
+
+func (s *server) createKey(ginContext *gin.Context) {
+ ok := s.checkObjectCreationThrottler(ginContext, "KEY")
+ if !ok {
+ return
+ }
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+
+ keyBytesArr := make([]byte, keySize)
+ n, err := rand.Read(keyBytesArr[:])
+ if n != keySize || err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to create key",
+ })
+ return
+ }
+ err = os.MkdirAll(s.folderNameFromKey(keyBytesArr), 0700)
+ if err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to create key",
+ })
+ return
+ }
+ ginContext.JSON(http.StatusCreated, gin.H{
+ "key": base64.RawURLEncoding.EncodeToString(keyBytesArr[:]),
+ })
+}
+
+func (s *server) checkObjectCreationThrottler(ginContext *gin.Context, typeName string) bool {
+ s.throttlerLock.Lock()
+ defer s.throttlerLock.Unlock()
+
+ // remove all elements older than 3 minutes (= 180000000000 ns)
+ now := time.Now().UnixNano()
+ cutoff := now - 180000000000
+ for keyCheck := range s.createdObjectsThrottler {
+ for i := 0; i < len(s.createdObjectsThrottler[keyCheck]); i++ {
+ if s.createdObjectsThrottler[keyCheck][i] < cutoff {
+ // Remove the element at index i from slice (safe while looping using i as iterator)
+ s.createdObjectsThrottler[keyCheck] = append(s.createdObjectsThrottler[keyCheck][:i], s.createdObjectsThrottler[keyCheck][i+1:]...)
+ i-- // Since we just deleted a[i], we must redo that index
+ }
+ }
+ length := len(s.createdObjectsThrottler[keyCheck])
+ if length == 0 {
+ delete(s.createdObjectsThrottler, keyCheck)
+ }
+ /*
+ if *verbose {
+ log.Println("Throttling count: "+strconv.Itoa(length))
+ }
+ */
+ }
+
+ // check current request
+ keyHash := hash(typeName) // getting the real client ip is not easy inside fully encapsulated containerized runtime
+ if _, ok := s.createdObjectsThrottler[keyHash]; !ok {
+ s.createdObjectsThrottler[keyHash] = make([]int64, 0)
+ }
+ // check the limit of 20 creations for this type per 3 minutes
+ withinLimit := len(s.createdObjectsThrottler[keyHash]) < 20
+ if withinLimit {
+ s.createdObjectsThrottler[keyHash] = append(s.createdObjectsThrottler[keyHash], now)
+ return true
+ }
+ ginContext.JSON(http.StatusTooManyRequests, gin.H{
+ "error": "object creation throttling exceeded (denial-of-service protection): please wait some time and try again",
+ })
+ return false
+}
+
+func (s *server) deleteKey(ginContext *gin.Context) {
+ folderName, _, ok := s.checkKeyToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+ err := os.RemoveAll(folderName)
+ if err != nil {
+ log.Println("error during key delete: " + err.Error())
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "key not found",
+ })
+ return
+ }
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "key deleted",
+ })
+}
+
+func (s *server) createToken(ginContext *gin.Context) {
+ folderName, key, ok := s.checkKeyToFolderName(ginContext)
+ if !ok {
+ return
+ }
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+ if tokenHash, exists := s.mapFolderNameToTokenHash[folderName]; exists {
+ // invalidate previous token
+ delete(s.mapTokenHashToTimeoutStruct, tokenHash)
+ }
+ // create a strong random 256 bit value (used to xor)
+ xorBytesArr := make([]byte, keySize)
+ n, err := rand.Read(xorBytesArr[:])
+ if n != keySize || err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusInternalServerError, gin.H{
+ "error": "unable to create token",
+ })
+ return
+ }
+ now := time.Now().UnixNano()
+ token := xor(key, xorBytesArr)
+ tokenHash := hashSHA256(token)
+ s.housekeepingTokenMaps()
+ s.mapTokenHashToTimeoutStruct[tokenHash] = timeoutStruct{
+ xorRand: xorBytesArr,
+ createdNanoTime: now,
+ lastAccessedNanoTime: now,
+ }
+ s.mapFolderNameToTokenHash[folderName] = tokenHash
+ ginContext.JSON(http.StatusCreated, gin.H{
+ "token": base64.RawURLEncoding.EncodeToString(token[:]),
+ })
+}
+
+type tokenHeader struct {
+ Token string `header:"token"`
+}
+
+func (s *server) deleteToken(ginContext *gin.Context) {
+ header := tokenHeader{}
+ if err := ginContext.ShouldBindHeader(&header); err != nil {
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return
+ }
+ token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token))
+ if len(token) == 0 || err != nil {
+ if err != nil {
+ log.Println(err)
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return
+ }
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+ s.deleteTokenHashFromMaps(hashSHA256(token))
+ ginContext.JSON(http.StatusOK, gin.H{
+ "message": "token deleted",
+ })
+}
+
+func (s *server) checkKeyToFolderName(ginContext *gin.Context) (folderNameOfKey string, key []byte, ok bool) {
+ header := keyHeader{}
+ if err := ginContext.ShouldBindHeader(&header); err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "key not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ key, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Key))
+ if len(key) == 0 || err != nil {
+ if err != nil {
+ log.Println(err)
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "key not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ folderNameOfKey = s.folderNameFromKey(key)
+ if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "key not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ return folderNameOfKey, key, true
+}
+
+func (s *server) checkTokenToFolderName(ginContext *gin.Context) (folderNameOfKey string, key []byte, ok bool) {
+ header := tokenHeader{}
+ if err := ginContext.ShouldBindHeader(&header); err != nil {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ token, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(header.Token))
+ if len(token) == 0 || err != nil {
+ if err != nil {
+ log.Println(err)
+ }
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ s.globalLock.Lock()
+ defer s.globalLock.Unlock()
+ s.housekeepingTokenMaps() // to remove timed-out ones
+ tokenHash := hashSHA256(token)
+ if timeoutStruct, exists := s.mapTokenHashToTimeoutStruct[tokenHash]; exists {
+ // re-create the key from token
+ key := xor(token, timeoutStruct.xorRand)
+ folderNameOfKey := s.folderNameFromKey(key)
+ if _, err := os.Stat(folderNameOfKey); os.IsNotExist(err) {
+ log.Println(err)
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return folderNameOfKey, key, false
+ }
+ timeoutStruct.lastAccessedNanoTime = time.Now().UnixNano()
+ return folderNameOfKey, key, true
+ } else {
+ ginContext.JSON(http.StatusNotFound, gin.H{
+ "error": "token not found",
+ })
+ return folderNameOfKey, key, false
+ }
+}
+
+func (s *server) folderNameFromKey(key []byte) string {
+ sha512Hash := hashSHA256(key)
+ return filepath.Join(s.config.ServerFolder, s.config.KeyFolder, sha512Hash)
+}
+
+func (s *server) housekeepingTokenMaps() {
+ now := time.Now().UnixNano()
+ for tokenHash, val := range s.mapTokenHashToTimeoutStruct {
+ if s.extremeShortTimeoutsForTesting {
+ // remove all elements older than 1 minute (= 60000000000 ns) soft
+ // and all elements older than 3 minutes (= 180000000000 ns) hard
+ if now-val.lastAccessedNanoTime > 60000000000 || now-val.createdNanoTime > 180000000000 {
+ fmt.Println("About to remove a token hash from maps")
+ s.deleteTokenHashFromMaps(tokenHash)
+ }
+ } else {
+ // remove all elements older than 30 minutes (= 1800000000000 ns) soft
+ // and all elements older than 10 hours (= 36000000000000 ns) hard
+ if now-val.lastAccessedNanoTime > 1800000000000 || now-val.createdNanoTime > 36000000000000 {
+ s.deleteTokenHashFromMaps(tokenHash)
+ }
+ }
+ }
+}
+
+func (s *server) deleteTokenHashFromMaps(tokenHash string) {
+ delete(s.mapTokenHashToTimeoutStruct, tokenHash)
+ for folderName, check := range s.mapFolderNameToTokenHash {
+ if check == tokenHash {
+ delete(s.mapFolderNameToTokenHash, folderName)
+ break
+ }
+ }
+}
diff --git a/pkg/server/zip.go b/pkg/server/zip.go
new file mode 100644
index 00000000..32401189
--- /dev/null
+++ b/pkg/server/zip.go
@@ -0,0 +1,125 @@
+/*
+Copyright Š 2023 NAME HERE
+*/
+package server
+
+import (
+ "archive/zip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// ZipFiles compresses one or many files into a single zip archive file.
+// Param 1: filename is the output zip file's name.
+// Param 2: files is a list of files to add to the zip.
+func zipFiles(filename string, files []string) error {
+ newZipFile, err := os.Create(filepath.Clean(filename))
+ if err != nil {
+ return err
+ }
+ defer func() { _ = newZipFile.Close() }()
+
+ zipWriter := zip.NewWriter(newZipFile)
+ defer func() { _ = zipWriter.Close() }()
+
+ // Add files to zip
+ for _, file := range files {
+ if err = addFileToZip(zipWriter, file); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Unzip will decompress a zip archive, moving all files and folders
+// within the zip file (parameter 1) to an output directory (parameter 2).
+func unzip(src string, dest string) ([]string, error) {
+ var filenames []string
+
+ r, err := zip.OpenReader(src)
+ if err != nil {
+ return filenames, err
+ }
+ defer func() { _ = r.Close() }()
+
+ for _, f := range r.File {
+ // Store filename/path for returning and using later on
+ path := filepath.Clean(filepath.Join(dest, filepath.Clean(f.Name)))
+ // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE
+ if !strings.HasPrefix(path, filepath.Clean(dest)+string(os.PathSeparator)) {
+ return filenames, fmt.Errorf("%s: illegal file path", path)
+ }
+ filenames = append(filenames, path)
+ if f.FileInfo().IsDir() {
+ // Make Folder
+ _ = os.MkdirAll(path, os.ModePerm)
+ continue
+ }
+ // Make File
+ if err = os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
+ return filenames, err
+ }
+ if path != filepath.Clean(path) {
+ return filenames, fmt.Errorf("weird file path %v", path)
+ }
+ outFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+ if err != nil {
+ return filenames, err
+ }
+
+ if f.FileInfo().Size() == 0 {
+ _ = outFile.Close()
+ continue
+ }
+
+ rc, err := f.Open()
+ if err != nil {
+ return filenames, err
+ }
+ _, err = io.CopyN(outFile, rc, f.FileInfo().Size())
+ // Close the file without defer to close before next iteration of loop
+ _ = outFile.Close()
+ _ = rc.Close()
+ if err != nil {
+ return filenames, err
+ }
+ }
+ return filenames, nil
+}
+
+func addFileToZip(zipWriter *zip.Writer, filename string) error {
+ fileToZip, err := os.Open(filepath.Clean(filename))
+ if err != nil {
+ return err
+ }
+ defer func() { _ = fileToZip.Close() }()
+
+ // Get the file information
+ info, err := fileToZip.Stat()
+ if err != nil {
+ return err
+ }
+
+ header, err := zip.FileInfoHeader(info)
+ if err != nil {
+ return err
+ }
+
+ // Using FileInfoHeader() above only uses the basename of the file. If we want
+ // to preserve the folder structure we can overwrite this with the full path.
+ //header.Name = filename
+
+ // Change to deflate to gain better compression
+ // see http://golang.org/pkg/archive/zip/#pkg-constants
+ header.Method = zip.Deflate
+
+ writer, err := zipWriter.CreateHeader(header)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(writer, fileToZip)
+ return err
+}
diff --git a/raa/dummy/dummy.go b/raa/dummy/dummy.go
deleted file mode 100644
index febfaf9a..00000000
--- a/raa/dummy/dummy.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-import (
- "fmt"
- "github.com/threagile/threagile/model"
- "math/rand"
-)
-
-// JUST A DUMMY TO HAVE AN ALTERNATIVE PLUGIN TO USE/TEST
-
-// used from plugin caller:
-func CalculateRAA() string {
- for techAssetID, techAsset := range model.ParsedModelRoot.TechnicalAssets {
- techAsset.RAA = float64(rand.Intn(100))
- fmt.Println("Using dummy RAA random calculation (just to test the usage of other shared object files as plugins)")
- model.ParsedModelRoot.TechnicalAssets[techAssetID] = techAsset
- }
- // return intro text (for reporting etc., can be short summary-like)
- return "Just some dummy algorithm implementation for demo purposes of pluggability..."
-}
diff --git a/report/json.go b/report/json.go
deleted file mode 100644
index bd4d8009..00000000
--- a/report/json.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package report
-
-import (
- "encoding/json"
- "github.com/threagile/threagile/model"
- "io/ioutil"
-)
-
-func WriteRisksJSON(filename string) {
- /*
- remainingRisks := make([]model.Risk, 0)
- for _, category := range model.SortedRiskCategories() {
- risks := model.SortedRisksOfCategory(category)
- for _, risk := range model.ReduceToOnlyStillAtRisk(risks) {
- remainingRisks = append(remainingRisks, risk)
- }
- }
- */
- jsonBytes, err := json.Marshal(model.AllRisks())
- if err != nil {
- panic(err)
- }
- err = ioutil.WriteFile(filename, jsonBytes, 0644)
- if err != nil {
- panic(err)
- }
-}
-
-// TODO: also a "data assets" json?
-func WriteTechnicalAssetsJSON(filename string) {
- jsonBytes, err := json.Marshal(model.ParsedModelRoot.TechnicalAssets)
- if err != nil {
- panic(err)
- }
- err = ioutil.WriteFile(filename, jsonBytes, 0644)
- if err != nil {
- panic(err)
- }
-}
-
-func WriteStatsJSON(filename string) {
- jsonBytes, err := json.Marshal(model.OverallRiskStatistics())
- if err != nil {
- panic(err)
- }
- err = ioutil.WriteFile(filename, jsonBytes, 0644)
- if err != nil {
- panic(err)
- }
-}
diff --git a/report/report.go b/report/report.go
deleted file mode 100644
index 39d3bdff..00000000
--- a/report/report.go
+++ /dev/null
@@ -1,5878 +0,0 @@
-package report
-
-import (
- "errors"
- "fmt"
- "github.com/jung-kurt/gofpdf"
- "github.com/jung-kurt/gofpdf/contrib/gofpdi"
- "github.com/threagile/threagile/colors"
- "github.com/threagile/threagile/model"
- "github.com/threagile/threagile/risks/built-in/accidental-secret-leak"
- "github.com/threagile/threagile/risks/built-in/code-backdooring"
- "github.com/threagile/threagile/risks/built-in/container-baseimage-backdooring"
- "github.com/threagile/threagile/risks/built-in/container-platform-escape"
- "github.com/threagile/threagile/risks/built-in/cross-site-request-forgery"
- "github.com/threagile/threagile/risks/built-in/cross-site-scripting"
- "github.com/threagile/threagile/risks/built-in/dos-risky-access-across-trust-boundary"
- "github.com/threagile/threagile/risks/built-in/incomplete-model"
- "github.com/threagile/threagile/risks/built-in/ldap-injection"
- "github.com/threagile/threagile/risks/built-in/missing-authentication"
- "github.com/threagile/threagile/risks/built-in/missing-authentication-second-factor"
- "github.com/threagile/threagile/risks/built-in/missing-build-infrastructure"
- "github.com/threagile/threagile/risks/built-in/missing-cloud-hardening"
- "github.com/threagile/threagile/risks/built-in/missing-file-validation"
- "github.com/threagile/threagile/risks/built-in/missing-hardening"
- "github.com/threagile/threagile/risks/built-in/missing-identity-propagation"
- "github.com/threagile/threagile/risks/built-in/missing-identity-provider-isolation"
- "github.com/threagile/threagile/risks/built-in/missing-identity-store"
- "github.com/threagile/threagile/risks/built-in/missing-network-segmentation"
- "github.com/threagile/threagile/risks/built-in/missing-vault"
- "github.com/threagile/threagile/risks/built-in/missing-vault-isolation"
- "github.com/threagile/threagile/risks/built-in/missing-waf"
- "github.com/threagile/threagile/risks/built-in/mixed-targets-on-shared-runtime"
- "github.com/threagile/threagile/risks/built-in/path-traversal"
- "github.com/threagile/threagile/risks/built-in/push-instead-of-pull-deployment"
- "github.com/threagile/threagile/risks/built-in/search-query-injection"
- "github.com/threagile/threagile/risks/built-in/server-side-request-forgery"
- "github.com/threagile/threagile/risks/built-in/service-registry-poisoning"
- "github.com/threagile/threagile/risks/built-in/sql-nosql-injection"
- "github.com/threagile/threagile/risks/built-in/unchecked-deployment"
- "github.com/threagile/threagile/risks/built-in/unencrypted-asset"
- "github.com/threagile/threagile/risks/built-in/unencrypted-communication"
- "github.com/threagile/threagile/risks/built-in/unguarded-access-from-internet"
- "github.com/threagile/threagile/risks/built-in/unguarded-direct-datastore-access"
- "github.com/threagile/threagile/risks/built-in/unnecessary-communication-link"
- "github.com/threagile/threagile/risks/built-in/unnecessary-data-asset"
- "github.com/threagile/threagile/risks/built-in/unnecessary-data-transfer"
- "github.com/threagile/threagile/risks/built-in/unnecessary-technical-asset"
- "github.com/threagile/threagile/risks/built-in/untrusted-deserialization"
- "github.com/threagile/threagile/risks/built-in/wrong-communication-link-content"
- "github.com/threagile/threagile/risks/built-in/wrong-trust-boundary-content"
- "github.com/threagile/threagile/risks/built-in/xml-external-entity"
- "github.com/wcharczuk/go-chart"
- "github.com/wcharczuk/go-chart/drawing"
- "image"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-const fontSizeHeadline, fontSizeHeadlineSmall, fontSizeBody, fontSizeSmall, fontSizeVerySmall = 20, 16, 12, 9, 7
-const /*dataFlowDiagramFullscreen,*/ allowedPdfLandscapePages, embedDiagramLegendPage = /*false,*/ true, false
-
-var isLandscapePage bool
-
-var pdf *gofpdf.Fpdf
-var alreadyTemplateImported = false
-var coverTemplateId, contentTemplateId, diagramLegendTemplateId int
-var pageNo int
-var linkCounter int
-var tocLinkIdByAssetId map[string]int
-var homeLink int
-var currentChapterTitleBreadcrumb string
-
-var firstParagraphRegEx = regexp.MustCompile(`(.*?)((
)|())`)
-
-func initReport() {
- pdf = nil
- isLandscapePage = false
- pageNo = 0
- linkCounter = 0
- homeLink = 0
- currentChapterTitleBreadcrumb = ""
- tocLinkIdByAssetId = make(map[string]int)
-}
-
-func WriteReportPDF(reportFilename string,
- templateFilename string,
- dataFlowDiagramFilenamePNG string,
- dataAssetDiagramFilenamePNG string,
- modelFilename string,
- skipRiskRules string,
- buildTimestamp string,
- modelHash string,
- introTextRAA string, customRiskRules map[string]model.CustomRiskRule) {
- initReport()
- createPdfAndInitMetadata()
- parseBackgroundTemplate(templateFilename)
- createCover()
- createTableOfContents()
- createManagementSummary()
- createImpactInitialRisks()
- createRiskMitigationStatus()
- createImpactRemainingRisks()
- createTargetDescription(filepath.Dir(modelFilename))
- embedDataFlowDiagram(dataFlowDiagramFilenamePNG)
- createSecurityRequirements()
- createAbuseCases()
- createTagListing()
- createSTRIDE()
- createAssignmentByFunction()
- createRAA(introTextRAA)
- embedDataRiskMapping(dataAssetDiagramFilenamePNG)
- //createDataRiskQuickWins()
- createOutOfScopeAssets()
- createModelFailures()
- createQuestions()
- createRiskCategories()
- createTechnicalAssets()
- createDataAssets()
- createTrustBoundaries()
- createSharedRuntimes()
- createRiskRulesChecked(modelFilename, skipRiskRules, buildTimestamp, modelHash, customRiskRules)
- createDisclaimer()
- writeReportToFile(reportFilename)
-}
-
-func checkErr(err error) {
- if err != nil {
- panic(err)
- }
-}
-
-func createPdfAndInitMetadata() {
- pdf = gofpdf.New("P", "mm", "A4", "")
- pdf.SetCreator(model.ParsedModelRoot.Author.Homepage, true)
- pdf.SetAuthor(model.ParsedModelRoot.Author.Name, true)
- pdf.SetTitle("Threat Model Report: "+model.ParsedModelRoot.Title, true)
- pdf.SetSubject("Threat Model Report: "+model.ParsedModelRoot.Title, true)
- // pdf.SetPageBox("crop", 0, 0, 100, 010)
- pdf.SetHeaderFunc(headerFunc)
- pdf.SetFooterFunc(footerFunc)
- linkCounter = 1 // link counting starts at 1 via pdf.AddLink
-}
-
-func headerFunc() {
- if !isLandscapePage {
- gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300)
- pdf.SetTopMargin(35)
- }
-}
-
-func footerFunc() {
- addBreadcrumb()
- pdf.SetFont("Helvetica", "", 10)
- pdf.SetTextColor(127, 127, 127)
- pdf.Text(8.6, 284, "Threat Model Report via Threagile") //: "+model.ParsedModelRoot.Title)
- pdf.Link(8.4, 281, 54.6, 4, homeLink)
- pageNo++
- text := "Page " + strconv.Itoa(pageNo)
- if pageNo < 10 {
- text = " " + text
- } else if pageNo < 100 {
- text = " " + text
- }
- if pageNo > 1 {
- pdf.Text(186, 284, text)
- }
-}
-
-func addBreadcrumb() {
- if len(currentChapterTitleBreadcrumb) > 0 {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetFont("Helvetica", "", 10)
- pdf.SetTextColor(127, 127, 127)
- pdf.Text(46.7, 24.5, uni(currentChapterTitleBreadcrumb+" - "+model.ParsedModelRoot.Title))
- }
-}
-
-func parseBackgroundTemplate(templateFilename string) {
- /*
- imageBox, err := rice.FindBox("template")
- checkErr(err)
- file, err := ioutil.TempFile("", "background-*-.pdf")
- checkErr(err)
- defer os.Remove(file.Name())
- backgroundBytes := imageBox.MustBytes("background.pdf")
- err = ioutil.WriteFile(file.Name(), backgroundBytes, 0644)
- checkErr(err)
- */
- coverTemplateId = gofpdi.ImportPage(pdf, templateFilename, 1, "/MediaBox")
- contentTemplateId = gofpdi.ImportPage(pdf, templateFilename, 2, "/MediaBox")
- diagramLegendTemplateId = gofpdi.ImportPage(pdf, templateFilename, 3, "/MediaBox")
-}
-
-func createCover() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.AddPage()
- gofpdi.UseImportedTemplate(pdf, coverTemplateId, 0, 0, 0, 300)
- pdf.SetFont("Helvetica", "B", 28)
- pdf.SetTextColor(0, 0, 0)
- pdf.Text(40, 110, "Threat Model Report")
- pdf.Text(40, 125, uni(model.ParsedModelRoot.Title))
- pdf.SetFont("Helvetica", "", 12)
- reportDate := model.ParsedModelRoot.Date
- if reportDate.IsZero() {
- reportDate = time.Now()
- }
- pdf.Text(40.7, 145, reportDate.Format("2 January 2006"))
- pdf.Text(40.7, 153, uni(model.ParsedModelRoot.Author.Name))
- pdf.SetFont("Helvetica", "", 10)
- pdf.SetTextColor(80, 80, 80)
- pdf.Text(8.6, 275, model.ParsedModelRoot.Author.Homepage)
- pdf.SetFont("Helvetica", "", 12)
- pdf.SetTextColor(0, 0, 0)
-}
-
-func createTableOfContents() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.AddPage()
- currentChapterTitleBreadcrumb = "Table of Contents"
- homeLink = pdf.AddLink()
- defineLinkTarget("{home}")
- gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300)
- pdf.SetFont("Helvetica", "B", fontSizeHeadline)
- pdf.Text(11, 40, "Table of Contents")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetY(46)
-
- pdf.SetLineWidth(0.25)
- pdf.SetDrawColor(160, 160, 160)
- pdf.SetDashPattern([]float64{0.5, 0.5}, 0)
-
- // ===============
-
- var y float64 = 50
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Text(11, y, "Results Overview")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- y += 6
- pdf.Text(11, y, " "+"Management Summary")
- pdf.Text(175, y, "{management-summary}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- risks := "Risks"
- catStr := "Categories"
- count, catCount := model.TotalRiskCount(), len(model.GeneratedRisksByCategory)
- if count == 1 {
- risks = "Risk"
- }
- if catCount == 1 {
- catStr = "Category"
- }
- y += 6
- pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Initial "+risks+" in "+strconv.Itoa(catCount)+" "+catStr)
- pdf.Text(175, y, "{impact-analysis-initial-risks}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Risk Mitigation")
- pdf.Text(175, y, "{risk-mitigation-status}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- risks = "Risks"
- catStr = "Categories"
- count, catCount = len(model.FilteredByStillAtRisk()), len(model.CategoriesOfOnlyRisksStillAtRisk(model.GeneratedRisksByCategory))
- if count == 1 {
- risks = "Risk"
- }
- if catCount == 1 {
- catStr = "Category"
- }
- pdf.Text(11, y, " "+"Impact Analysis of "+strconv.Itoa(count)+" Remaining "+risks+" in "+strconv.Itoa(catCount)+" "+catStr)
- pdf.Text(175, y, "{impact-analysis-remaining-risks}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Application Overview")
- pdf.Text(175, y, "{target-overview}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Data-Flow Diagram")
- pdf.Text(175, y, "{data-flow-diagram}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Security Requirements")
- pdf.Text(175, y, "{security-requirements}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Abuse Cases")
- pdf.Text(175, y, "{abuse-cases}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Tag Listing")
- pdf.Text(175, y, "{tag-listing}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"STRIDE Classification of Identified Risks")
- pdf.Text(175, y, "{stride}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Assignment by Function")
- pdf.Text(175, y, "{function-assignment}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"RAA Analysis")
- pdf.Text(175, y, "{raa-analysis}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- pdf.Text(11, y, " "+"Data Mapping")
- pdf.Text(175, y, "{data-risk-mapping}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- /*
- y += 6
- assets := "assets"
- count = len(model.SortedTechnicalAssetsByQuickWinsAndTitle())
- if count == 1 {
- assets = "asset"
- }
- pdf.Text(11, y, " "+"Data Risk Quick Wins: "+strconv.Itoa(count)+" "+assets)
- pdf.Text(175, y, "{data-risk-quick-wins}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- */
-
- y += 6
- assets := "Assets"
- count = len(model.OutOfScopeTechnicalAssets())
- if count == 1 {
- assets = "Asset"
- }
- pdf.Text(11, y, " "+"Out-of-Scope Assets: "+strconv.Itoa(count)+" "+assets)
- pdf.Text(175, y, "{out-of-scope-assets}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- modelFailures := model.FlattenRiskSlice(model.FilterByModelFailures(model.GeneratedRisksByCategory))
- risks = "Risks"
- count = len(modelFailures)
- if count == 1 {
- risks = "Risk"
- }
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(modelFailures))
- if countStillAtRisk > 0 {
- colors.ColorModelFailure(pdf)
- }
- pdf.Text(11, y, " "+"Potential Model Failures: "+strconv.Itoa(countStillAtRisk)+" / "+strconv.Itoa(count)+" "+risks)
- pdf.Text(175, y, "{model-failures}")
- pdfColorBlack()
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- y += 6
- questions := "Questions"
- count = len(model.ParsedModelRoot.Questions)
- if count == 1 {
- questions = "Question"
- }
- if model.QuestionsUnanswered() > 0 {
- colors.ColorModelFailure(pdf)
- }
- pdf.Text(11, y, " "+"Questions: "+strconv.Itoa(model.QuestionsUnanswered())+" / "+strconv.Itoa(count)+" "+questions)
- pdf.Text(175, y, "{questions}")
- pdfColorBlack()
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
-
- // ===============
-
- if len(model.GeneratedRisksByCategory) > 0 {
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- pdf.Text(11, y, "Risks by Vulnerability Category")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- y += 6
- pdf.Text(11, y, " "+"Identified Risks by Vulnerability Category")
- pdf.Text(175, y, "{intro-risks-by-vulnerability-category}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- for _, category := range model.SortedRiskCategories() {
- risks := model.SortedRisksOfCategory(category)
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- pdf.Text(11, y, " "+uni(category.Title)+": "+suffix)
- pdf.Text(175, y, "{"+category.Id+"}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- tocLinkIdByAssetId[category.Id] = pdf.AddLink()
- pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[category.Id])
- }
- }
-
- // ===============
-
- if len(model.ParsedModelRoot.TechnicalAssets) > 0 {
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- pdf.Text(11, y, "Risks by Technical Asset")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- y += 6
- pdf.Text(11, y, " "+"Identified Risks by Technical Asset")
- pdf.Text(175, y, "{intro-risks-by-technical-asset}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- for _, technicalAsset := range model.SortedTechnicalAssetsByRiskSeverityAndTitle() {
- risks := technicalAsset.GeneratedRisks()
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- if technicalAsset.OutOfScope {
- pdfColorOutOfScope()
- suffix = "out-of-scope"
- } else {
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
- }
- pdf.Text(11, y, " "+uni(technicalAsset.Title)+": "+suffix)
- pdf.Text(175, y, "{"+technicalAsset.Id+"}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- tocLinkIdByAssetId[technicalAsset.Id] = pdf.AddLink()
- pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[technicalAsset.Id])
- }
- }
-
- // ===============
-
- if len(model.ParsedModelRoot.DataAssets) > 0 {
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdfColorBlack()
- pdf.Text(11, y, "Data Breach Probabilities by Data Asset")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- y += 6
- pdf.Text(11, y, " "+"Identified Data Breach Probabilities by Data Asset")
- pdf.Text(175, y, "{intro-risks-by-data-asset}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- for _, dataAsset := range model.SortedDataAssetsByDataBreachProbabilityAndTitle() {
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- risks := dataAsset.IdentifiedDataBreachProbabilityRisks()
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() {
- case model.Probable:
- colors.ColorHighRisk(pdf)
- case model.Possible:
- colors.ColorMediumRisk(pdf)
- case model.Improbable:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if !dataAsset.IsDataBreachPotentialStillAtRisk() {
- pdfColorBlack()
- }
- pdf.Text(11, y, " "+uni(dataAsset.Title)+": "+suffix)
- pdf.Text(175, y, "{data:"+dataAsset.Id+"}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- tocLinkIdByAssetId[dataAsset.Id] = pdf.AddLink()
- pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[dataAsset.Id])
- }
- }
-
- // ===============
-
- if len(model.ParsedModelRoot.TrustBoundaries) > 0 {
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdfColorBlack()
- pdf.Text(11, y, "Trust Boundaries")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- for _, key := range model.SortedKeysOfTrustBoundaries() {
- trustBoundary := model.ParsedModelRoot.TrustBoundaries[key]
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- colors.ColorTwilight(pdf)
- if !trustBoundary.Type.IsNetworkBoundary() {
- pdfColorLightGray()
- }
- pdf.Text(11, y, " "+uni(trustBoundary.Title))
- pdf.Text(175, y, "{boundary:"+trustBoundary.Id+"}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- tocLinkIdByAssetId[trustBoundary.Id] = pdf.AddLink()
- pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[trustBoundary.Id])
- }
- pdfColorBlack()
- }
-
- // ===============
-
- if len(model.ParsedModelRoot.SharedRuntimes) > 0 {
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdfColorBlack()
- pdf.Text(11, y, "Shared Runtime")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- for _, key := range model.SortedKeysOfSharedRuntime() {
- sharedRuntime := model.ParsedModelRoot.SharedRuntimes[key]
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- pdf.Text(11, y, " "+uni(sharedRuntime.Title))
- pdf.Text(175, y, "{runtime:"+sharedRuntime.Id+"}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- tocLinkIdByAssetId[sharedRuntime.Id] = pdf.AddLink()
- pdf.Link(10, y-5, 172.5, 6.5, tocLinkIdByAssetId[sharedRuntime.Id])
- }
- }
-
- // ===============
-
- y += 6
- y += 6
- if y > 260 { // 260 instead of 275 for major group headlines to avoid "Schusterjungen"
- pageBreakInLists()
- y = 40
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Text(11, y, "About Threagile")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- pdf.Text(11, y, " "+"Risk Rules Checked by Threagile")
- pdf.Text(175, y, "{risk-rules-checked}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- y += 6
- if y > 275 {
- pageBreakInLists()
- y = 40
- }
- pdfColorDisclaimer()
- pdf.Text(11, y, " "+"Disclaimer")
- pdf.Text(175, y, "{disclaimer}")
- pdf.Line(15.6, y+1.3, 11+171.5, y+1.3)
- pdf.Link(10, y-5, 172.5, 6.5, pdf.AddLink())
- pdfColorBlack()
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-
- // Now write all the sections/pages. Before we start writing, we use `RegisterAlias` to
- // ensure that the alias written in the table of contents will be replaced
- // by the current page number. --> See the "pdf.RegisterAlias()" calls during the PDF creation in this file
-}
-
-func defineLinkTarget(alias string) {
- pageNumbStr := strconv.Itoa(pdf.PageNo())
- if len(pageNumbStr) == 1 {
- pageNumbStr = " " + pageNumbStr
- } else if len(pageNumbStr) == 2 {
- pageNumbStr = " " + pageNumbStr
- }
- pdf.RegisterAlias(alias, pageNumbStr)
- pdf.SetLink(linkCounter, 0, -1)
- linkCounter++
-}
-
-func createDisclaimer() {
- pdf.AddPage()
- currentChapterTitleBreadcrumb = "Disclaimer"
- defineLinkTarget("{disclaimer}")
- gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300)
- pdfColorDisclaimer()
- pdf.SetFont("Helvetica", "B", fontSizeHeadline)
- pdf.Text(11, 40, "Disclaimer")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetY(46)
-
- var disclaimer strings.Builder
- disclaimer.WriteString(model.ParsedModelRoot.Author.Name + " conducted this threat analysis using the open-source Threagile toolkit " +
- "on the applications and systems that were modeled as of this report's date. " +
- "Information security threats are continually changing, with new " +
- "vulnerabilities discovered on a daily basis, and no application can ever be 100% secure no matter how much " +
- "threat modeling is conducted. It is recommended to execute threat modeling and also penetration testing on a regular basis " +
- "(for example yearly) to ensure a high ongoing level of security and constantly check for new attack vectors. " +
- "
" +
- "This report cannot and does not protect against personal or business loss as the result of use of the " +
- "applications or systems described. " + model.ParsedModelRoot.Author.Name + " and the Threagile toolkit offers no warranties, representations or " +
- "legal certifications concerning the applications or systems it tests. All software includes defects: nothing " +
- "in this document is intended to represent or warrant that threat modeling was complete and without error, " +
- "nor does this document represent or warrant that the architecture analyzed is suitable to task, free of other " +
- "defects than reported, fully compliant with any industry standards, or fully compatible with any operating " +
- "system, hardware, or other application. Threat modeling tries to analyze the modeled architecture without " +
- "having access to a real working system and thus cannot and does not test the implementation for defects and vulnerabilities. " +
- "These kinds of checks would only be possible with a separate code review and penetration test against " +
- "a working system and not via a threat model." +
- "
" +
- "By using the resulting information you agree that " + model.ParsedModelRoot.Author.Name + " and the Threagile toolkit " +
- "shall be held harmless in any event." +
- "
" +
- "This report is confidential and intended for internal, confidential use by the client. The recipient " +
- "is obligated to ensure the highly confidential contents are kept secret. The recipient assumes responsibility " +
- "for further distribution of this document." +
- "
" +
- "In this particular project, a timebox approach was used to define the analysis effort. This means that the " +
- "author allotted a prearranged amount of time to identify and document threats. Because of this, there " +
- "is no guarantee that all possible threats and risks are discovered. Furthermore, the analysis " +
- "applies to a snapshot of the current state of the modeled architecture (based on the architecture information provided " +
- "by the customer) at the examination time." +
- "
" +
- "Report Distribution" +
- "
" +
- "Distribution of this report (in full or in part like diagrams or risk findings) requires that this disclaimer " +
- "as well as the chapter about the Threagile toolkit and method used is kept intact as part of the " +
- "distributed report or referenced from the distributed parts.")
- html := pdf.HTMLBasicNew()
- html.Write(5, disclaimer.String())
- pdfColorBlack()
-}
-
-func createManagementSummary() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- title := "Management Summary"
- addHeadline(title, false)
- defineLinkTarget("{management-summary}")
- currentChapterTitleBreadcrumb = title
- countCritical := len(model.FilteredByOnlyCriticalRisks())
- countHigh := len(model.FilteredByOnlyHighRisks())
- countElevated := len(model.FilteredByOnlyElevatedRisks())
- countMedium := len(model.FilteredByOnlyMediumRisks())
- countLow := len(model.FilteredByOnlyLowRisks())
-
- countStatusUnchecked := len(model.FilteredByRiskTrackingUnchecked())
- countStatusInDiscussion := len(model.FilteredByRiskTrackingInDiscussion())
- countStatusAccepted := len(model.FilteredByRiskTrackingAccepted())
- countStatusInProgress := len(model.FilteredByRiskTrackingInProgress())
- countStatusMitigated := len(model.FilteredByRiskTrackingMitigated())
- countStatusFalsePositive := len(model.FilteredByRiskTrackingFalsePositive())
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "Threagile toolkit was used to model the architecture of \""+uni(model.ParsedModelRoot.Title)+"\" "+
- "and derive risks by analyzing the components and data flows. The risks identified during this analysis are shown "+
- "in the following chapters. Identified risks during threat modeling do not necessarily mean that the "+
- "vulnerability associated with this risk actually exists: it is more to be seen as a list of potential risks and "+
- "threats, which should be individually reviewed and reduced by removing false positives. For the remaining risks it should "+
- "be checked in the design and implementation of \""+uni(model.ParsedModelRoot.Title)+"\" whether the mitigation advices "+
- "have been applied or not."+
- "
"+
- "Each risk finding references a chapter of the OWASP ASVS (Application Security Verification Standard) audit checklist. "+
- "The OWASP ASVS checklist should be considered as an inspiration by architects and developers to further harden "+
- "the application in a Defense-in-Depth approach. Additionally, for each risk finding a "+
- "link towards a matching OWASP Cheat Sheet or similar with technical details about how to implement a mitigation is given."+
- "
"+
- "In total "+strconv.Itoa(model.TotalRiskCount())+" initial risks in "+strconv.Itoa(len(model.GeneratedRisksByCategory))+" categories have "+
- "been identified during the threat modeling process:
") // TODO plural singular stuff risk/s category/ies has/have
-
- pdf.SetFont("Helvetica", "B", fontSizeBody)
-
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusUnchecked(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
-
- colors.ColorCriticalRisk(pdf)
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "critical risk", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusInDiscussion(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
-
- colors.ColorHighRisk(pdf)
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "high risk", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusAccepted(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
-
- colors.ColorElevatedRisk(pdf)
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "elevated risk", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusInProgress(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
-
- colors.ColorMediumRisk(pdf)
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "medium risk", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusMitigated(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "")
- pdf.SetFont("Helvetica", "BI", fontSizeBody)
- pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(-1)
-
- colors.ColorLowRisk(pdf)
- pdf.CellFormat(17, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "low risk", "0", 0, "", false, 0, "")
- colors.ColorRiskStatusFalsePositive(pdf)
- pdf.CellFormat(23, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "")
- pdf.SetFont("Helvetica", "BI", fontSizeBody)
- pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(-1)
-
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- // pie chart: risk severity
- pieChartRiskSeverity := chart.PieChart{
- Width: 1500,
- Height: 1500,
- Values: []chart.Value{
- {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorLowRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorLowRisk()),
- FontSize: 65}},
- {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorMediumRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorMediumRisk()),
- FontSize: 65}},
- {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorElevatedRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorElevatedRisk()),
- FontSize: 65}},
- {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorHighRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorHighRisk()),
- FontSize: 65}},
- {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorCriticalRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorCriticalRisk()),
- FontSize: 65}},
- },
- }
-
- // pie chart: risk status
- pieChartRiskStatus := chart.PieChart{
- Width: 1500,
- Height: 1500,
- Values: []chart.Value{
- {Value: float64(countStatusFalsePositive), //Label: strconv.Itoa(countStatusFalsePositive) + " False Positive",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()),
- FontSize: 65}},
- {Value: float64(countStatusMitigated), //Label: strconv.Itoa(countStatusMitigated) + " Mitigated",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusMitigated()),
- FontSize: 65}},
- {Value: float64(countStatusInProgress), //Label: strconv.Itoa(countStatusInProgress) + " InProgress",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusInProgress()),
- FontSize: 65}},
- {Value: float64(countStatusAccepted), //Label: strconv.Itoa(countStatusAccepted) + " Accepted",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusAccepted()),
- FontSize: 65}},
- {Value: float64(countStatusInDiscussion), //Label: strconv.Itoa(countStatusInDiscussion) + " InDiscussion",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()),
- FontSize: 65}},
- {Value: float64(countStatusUnchecked), //Label: strconv.Itoa(countStatusUnchecked) + " Unchecked",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()),
- FontSize: 65}},
- },
- }
-
- y := pdf.GetY() + 5
- embedPieChart(pieChartRiskSeverity, 15.0, y)
- embedPieChart(pieChartRiskStatus, 110.0, y)
-
- // individual management summary comment
- pdfColorBlack()
- if len(model.ParsedModelRoot.ManagementSummaryComment) > 0 {
- html.Write(5, "
"+
- model.ParsedModelRoot.ManagementSummaryComment)
- }
-}
-
-func createRiskMitigationStatus() {
- pdf.SetTextColor(0, 0, 0)
- stillAtRisk := model.FilteredByStillAtRisk()
- count := len(stillAtRisk)
- title := "Risk Mitigation"
- addHeadline(title, false)
- defineLinkTarget("{risk-mitigation-status}")
- currentChapterTitleBreadcrumb = title
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "The following chart gives a high-level overview of the risk tracking status (including mitigated risks):")
-
- risksCritical := model.FilteredByOnlyCriticalRisks()
- risksHigh := model.FilteredByOnlyHighRisks()
- risksElevated := model.FilteredByOnlyElevatedRisks()
- risksMedium := model.FilteredByOnlyMediumRisks()
- risksLow := model.FilteredByOnlyLowRisks()
-
- countStatusUnchecked := len(model.FilteredByRiskTrackingUnchecked())
- countStatusInDiscussion := len(model.FilteredByRiskTrackingInDiscussion())
- countStatusAccepted := len(model.FilteredByRiskTrackingAccepted())
- countStatusInProgress := len(model.FilteredByRiskTrackingInProgress())
- countStatusMitigated := len(model.FilteredByRiskTrackingMitigated())
- countStatusFalsePositive := len(model.FilteredByRiskTrackingFalsePositive())
-
- stackedBarChartRiskTracking := chart.StackedBarChart{
- Width: 4000,
- //Height: 2500,
- XAxis: chart.Style{Show: false, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom},
- YAxis: chart.Style{Show: true, FontSize: 26, TextVerticalAlign: chart.TextVerticalAlignBottom},
- Bars: []chart.StackedBar{
- {
- Name: model.LowSeverity.Title(),
- Width: 130,
- Values: []chart.Value{
- {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksLow))), Label: model.Unchecked.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksLow))), Label: model.InDiscussion.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksLow))), Label: model.Accepted.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksLow))), Label: model.InProgress.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksLow))), Label: model.Mitigated.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksLow))), Label: model.FalsePositive.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- },
- },
- {
- Name: model.MediumSeverity.Title(),
- Width: 130,
- Values: []chart.Value{
- {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksMedium))), Label: model.Unchecked.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksMedium))), Label: model.InDiscussion.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksMedium))), Label: model.Accepted.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksMedium))), Label: model.InProgress.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksMedium))), Label: model.Mitigated.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksMedium))), Label: model.FalsePositive.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- },
- },
- {
- Name: model.ElevatedSeverity.Title(),
- Width: 130,
- Values: []chart.Value{
- {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksElevated))), Label: model.Unchecked.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksElevated))), Label: model.InDiscussion.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksElevated))), Label: model.Accepted.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksElevated))), Label: model.InProgress.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksElevated))), Label: model.Mitigated.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksElevated))), Label: model.FalsePositive.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- },
- },
- {
- Name: model.HighSeverity.Title(),
- Width: 130,
- Values: []chart.Value{
- {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksHigh))), Label: model.Unchecked.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksHigh))), Label: model.InDiscussion.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksHigh))), Label: model.Accepted.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksHigh))), Label: model.InProgress.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksHigh))), Label: model.Mitigated.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksHigh))), Label: model.FalsePositive.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- },
- },
- {
- Name: model.CriticalSeverity.Title(),
- Width: 130,
- Values: []chart.Value{
- {Value: float64(len(model.ReduceToOnlyRiskTrackingUnchecked(risksCritical))), Label: model.Unchecked.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusUnchecked()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInDiscussion(risksCritical))), Label: model.InDiscussion.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInDiscussion()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingAccepted(risksCritical))), Label: model.Accepted.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusAccepted()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingInProgress(risksCritical))), Label: model.InProgress.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusInProgress()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingMitigated(risksCritical))), Label: model.Mitigated.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusMitigated()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- {Value: float64(len(model.ReduceToOnlyRiskTrackingFalsePositive(risksCritical))), Label: model.FalsePositive.Title(),
- Style: chart.Style{FillColor: makeColor(colors.RgbHexColorRiskStatusFalsePositive()).WithAlpha(98), StrokeColor: drawing.ColorFromHex("999")}},
- },
- },
- },
- }
-
- y := pdf.GetY() + 12
- embedStackedBarChart(stackedBarChartRiskTracking, 15.0, y)
-
- // draw the X-Axis legend on my own
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorBlack()
- pdf.Text(24.02, 169, "Low ("+strconv.Itoa(len(risksLow))+")")
- pdf.Text(46.10, 169, "Medium ("+strconv.Itoa(len(risksMedium))+")")
- pdf.Text(69.74, 169, "Elevated ("+strconv.Itoa(len(risksElevated))+")")
- pdf.Text(97.95, 169, "High ("+strconv.Itoa(len(risksHigh))+")")
- pdf.Text(121.65, 169, "Critical ("+strconv.Itoa(len(risksCritical))+")")
-
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(20)
-
- colors.ColorRiskStatusUnchecked(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusUnchecked), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unchecked", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorRiskStatusInDiscussion(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusInDiscussion), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "in discussion", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorRiskStatusAccepted(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusAccepted), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "accepted", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorRiskStatusInProgress(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusInProgress), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "in progress", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorRiskStatusMitigated(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusMitigated), "0", 0, "R", false, 0, "")
- pdf.SetFont("Helvetica", "BI", fontSizeBody)
- pdf.CellFormat(60, 6, "mitigated", "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(-1)
- colors.ColorRiskStatusFalsePositive(pdf)
- pdf.CellFormat(150, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countStatusFalsePositive), "0", 0, "R", false, 0, "")
- pdf.SetFont("Helvetica", "BI", fontSizeBody)
- pdf.CellFormat(60, 6, "false positive", "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(-1)
-
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- pdfColorBlack()
- if count == 0 {
- html.Write(5, "
"+
- "After removal of risks with status mitigated and false positive "+
- ""+strconv.Itoa(count)+" remain unmitigated.")
- } else {
- html.Write(5, "
"+
- "After removal of risks with status mitigated and false positive "+
- "the following "+strconv.Itoa(count)+" remain unmitigated:")
-
- countCritical := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyCriticalRisks()))
- countHigh := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyHighRisks()))
- countElevated := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyElevatedRisks()))
- countMedium := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyMediumRisks()))
- countLow := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyLowRisks()))
-
- countBusinessSide := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyBusinessSide()))
- countArchitecture := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyArchitecture()))
- countDevelopment := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyDevelopment()))
- countOperation := len(model.ReduceToOnlyStillAtRisk(model.FilteredByOnlyOperation()))
-
- pieChartRemainingRiskSeverity := chart.PieChart{
- Width: 1500,
- Height: 1500,
- Values: []chart.Value{
- {Value: float64(countLow), //Label: strconv.Itoa(countLow) + " Low",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorLowRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorLowRisk()),
- FontSize: 65}},
- {Value: float64(countMedium), //Label: strconv.Itoa(countMedium) + " Medium",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorMediumRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorMediumRisk()),
- FontSize: 65}},
- {Value: float64(countElevated), //Label: strconv.Itoa(countElevated) + " Elevated",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorElevatedRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorElevatedRisk()),
- FontSize: 65}},
- {Value: float64(countHigh), //Label: strconv.Itoa(countHigh) + " High",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorHighRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorHighRisk()),
- FontSize: 65}},
- {Value: float64(countCritical), //Label: strconv.Itoa(countCritical) + " Critical",
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorCriticalRisk()).WithAlpha(98),
- //FontColor: makeColor(colors.RgbHexColorCriticalRisk()),
- FontSize: 65}},
- },
- }
-
- pieChartRemainingRisksByFunction := chart.PieChart{
- Width: 1500,
- Height: 1500,
- Values: []chart.Value{
- {Value: float64(countBusinessSide),
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorBusiness()).WithAlpha(98),
- FontSize: 65}},
- {Value: float64(countArchitecture),
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorArchitecture()).WithAlpha(98),
- FontSize: 65}},
- {Value: float64(countDevelopment),
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorDevelopment()).WithAlpha(98),
- FontSize: 65}},
- {Value: float64(countOperation),
- Style: chart.Style{
- FillColor: makeColor(colors.RgbHexColorOperation()).WithAlpha(98),
- FontSize: 65}},
- },
- }
-
- embedPieChart(pieChartRemainingRiskSeverity, 15.0, 216)
- embedPieChart(pieChartRemainingRisksByFunction, 110.0, 216)
-
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.Ln(8)
-
- colors.ColorCriticalRisk(pdf)
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countCritical), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unmitigated critical risk", "0", 0, "", false, 0, "")
- pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, "", "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorHighRisk(pdf)
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countHigh), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unmitigated high risk", "0", 0, "", false, 0, "")
- colors.ColorBusiness(pdf)
- pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countBusinessSide), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "business side related", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorElevatedRisk(pdf)
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countElevated), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unmitigated elevated risk", "0", 0, "", false, 0, "")
- colors.ColorArchitecture(pdf)
- pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countArchitecture), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "architecture related", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorMediumRisk(pdf)
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countMedium), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unmitigated medium risk", "0", 0, "", false, 0, "")
- colors.ColorDevelopment(pdf)
- pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countDevelopment), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "development related", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- colors.ColorLowRisk(pdf)
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countLow), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "unmitigated low risk", "0", 0, "", false, 0, "")
- colors.ColorOperation(pdf)
- pdf.CellFormat(22, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(10, 6, strconv.Itoa(countOperation), "0", 0, "R", false, 0, "")
- pdf.CellFormat(60, 6, "operations related", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- }
-}
-
-// CAUTION: Long labels might cause endless loop, then remove labels and render them manually later inside the PDF
-func embedStackedBarChart(sbcChart chart.StackedBarChart, x float64, y float64) {
- tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "chart-*-.png")
- checkErr(err)
- defer os.Remove(tmpFilePNG.Name())
- file, _ := os.Create(tmpFilePNG.Name())
- defer file.Close()
- err = sbcChart.Render(chart.PNG, file)
- checkErr(err)
- var options gofpdf.ImageOptions
- options.ImageType = ""
- pdf.RegisterImage(tmpFilePNG.Name(), "")
- pdf.ImageOptions(tmpFilePNG.Name(), x, y, 0, 110, false, options, 0, "")
-}
-
-func embedPieChart(pieChart chart.PieChart, x float64, y float64) {
- tmpFilePNG, err := ioutil.TempFile(model.TempFolder, "chart-*-.png")
- checkErr(err)
- defer os.Remove(tmpFilePNG.Name())
- file, err := os.Create(tmpFilePNG.Name())
- checkErr(err)
- defer file.Close()
- err = pieChart.Render(chart.PNG, file)
- checkErr(err)
- var options gofpdf.ImageOptions
- options.ImageType = ""
- pdf.RegisterImage(tmpFilePNG.Name(), "")
- pdf.ImageOptions(tmpFilePNG.Name(), x, y, 60, 0, false, options, 0, "")
-}
-
-func makeColor(hexColor string) drawing.Color {
- _, i := utf8.DecodeRuneInString(hexColor)
- return drawing.ColorFromHex(hexColor[i:]) // = remove first char, which is # in rgb hex here
-}
-
-func createImpactInitialRisks() {
- renderImpactAnalysis(true)
-}
-
-func createImpactRemainingRisks() {
- renderImpactAnalysis(false)
-}
-
-func renderImpactAnalysis(initialRisks bool) {
- pdf.SetTextColor(0, 0, 0)
- count, catCount := model.TotalRiskCount(), len(model.GeneratedRisksByCategory)
- if !initialRisks {
- count, catCount = len(model.FilteredByStillAtRisk()), len(model.CategoriesOfOnlyRisksStillAtRisk(model.GeneratedRisksByCategory))
- }
- riskStr, catStr := "Risks", "Categories"
- if count == 1 {
- riskStr = "Risk"
- }
- if catCount == 1 {
- catStr = "Category"
- }
- if initialRisks {
- chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Initial " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr
- addHeadline(chapTitle, false)
- defineLinkTarget("{impact-analysis-initial-risks}")
- currentChapterTitleBreadcrumb = chapTitle
- } else {
- chapTitle := "Impact Analysis of " + strconv.Itoa(count) + " Remaining " + riskStr + " in " + strconv.Itoa(catCount) + " " + catStr
- addHeadline(chapTitle, false)
- defineLinkTarget("{impact-analysis-remaining-risks}")
- currentChapterTitleBreadcrumb = chapTitle
- }
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- riskStr = "risks"
- if count == 1 {
- riskStr = "risk"
- }
- initialStr := "initial"
- if !initialRisks {
- initialStr = "remaining"
- }
- strBuilder.WriteString("The most prevalent impacts of the " + strconv.Itoa(count) + " " +
- initialStr + " " + riskStr + " (distributed over " + strconv.Itoa(catCount) + " risk categories) are " +
- "(taking the severity ratings into account and using the highest for each category):
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- addCategories(model.CategoriesOfOnlyCriticalRisks(model.GeneratedRisksByCategory, initialRisks),
- model.CriticalSeverity, false, initialRisks, true, false)
- addCategories(model.CategoriesOfOnlyHighRisks(model.GeneratedRisksByCategory, initialRisks),
- model.HighSeverity, false, initialRisks, true, false)
- addCategories(model.CategoriesOfOnlyElevatedRisks(model.GeneratedRisksByCategory, initialRisks),
- model.ElevatedSeverity, false, initialRisks, true, false)
- addCategories(model.CategoriesOfOnlyMediumRisks(model.GeneratedRisksByCategory, initialRisks),
- model.MediumSeverity, false, initialRisks, true, false)
- addCategories(model.CategoriesOfOnlyLowRisks(model.GeneratedRisksByCategory, initialRisks),
- model.LowSeverity, false, initialRisks, true, false)
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-func createOutOfScopeAssets() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- assets := "Assets"
- count := len(model.OutOfScopeTechnicalAssets())
- if count == 1 {
- assets = "Asset"
- }
- chapTitle := "Out-of-Scope Assets: " + strconv.Itoa(count) + " " + assets
- addHeadline(chapTitle, false)
- defineLinkTarget("{out-of-scope-assets}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- strBuilder.WriteString("This chapter lists all technical assets that have been defined as out-of-scope. " +
- "Each one should be checked in the model whether it should better be included in the " +
- "overall risk analysis:
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- outOfScopeAssetCount := 0
- for _, technicalAsset := range model.SortedTechnicalAssetsByRAAAndTitle() {
- if technicalAsset.OutOfScope {
- outOfScopeAssetCount++
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- strBuilder.WriteString("
")
- }
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- posY := pdf.GetY()
- pdfColorOutOfScope()
- strBuilder.WriteString("")
- strBuilder.WriteString(uni(technicalAsset.Title))
- strBuilder.WriteString("")
- strBuilder.WriteString(": out-of-scope")
- strBuilder.WriteString("
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetTextColor(0, 0, 0)
- strBuilder.WriteString(uni(technicalAsset.JustificationOutOfScope))
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id])
- }
- }
-
- if outOfScopeAssetCount == 0 {
- pdfColorGray()
- html.Write(5, "
No technical assets have been defined as out-of-scope.")
- }
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-func createModelFailures() {
- pdf.SetTextColor(0, 0, 0)
- modelFailures := model.FlattenRiskSlice(model.FilterByModelFailures(model.GeneratedRisksByCategory))
- risks := "Risks"
- count := len(modelFailures)
- if count == 1 {
- risks = "Risk"
- }
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(modelFailures))
- if countStillAtRisk > 0 {
- colors.ColorModelFailure(pdf)
- }
- chapTitle := "Potential Model Failures: " + strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(count) + " " + risks
- addHeadline(chapTitle, false)
- defineLinkTarget("{model-failures}")
- currentChapterTitleBreadcrumb = chapTitle
- pdfColorBlack()
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- strBuilder.WriteString("This chapter lists potential model failures where not all relevant assets have been " +
- "modeled or the model might itself contain inconsistencies. Each potential model failure should be checked " +
- "in the model against the architecture design:
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- modelFailuresByCategory := model.FilterByModelFailures(model.GeneratedRisksByCategory)
- if len(modelFailuresByCategory) == 0 {
- pdfColorGray()
- html.Write(5, "
No potential model failures have been identified.")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(modelFailuresByCategory, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(modelFailuresByCategory, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(modelFailuresByCategory, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(modelFailuresByCategory, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(modelFailuresByCategory, true),
- model.LowSeverity, true, true, false, true)
- }
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-func createRAA(introTextRAA string) {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- chapTitle := "RAA Analysis"
- addHeadline(chapTitle, false)
- defineLinkTarget("{raa-analysis}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- strBuilder.WriteString(introTextRAA)
- strBuilder.WriteString("
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- for _, technicalAsset := range model.SortedTechnicalAssetsByRAAAndTitle() {
- if technicalAsset.OutOfScope {
- continue
- }
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- strBuilder.WriteString("
")
- }
- risks := technicalAsset.GeneratedRisks()
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
-
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- posY := pdf.GetY()
- strBuilder.WriteString("")
- strBuilder.WriteString(uni(technicalAsset.Title))
- strBuilder.WriteString("")
- if technicalAsset.OutOfScope {
- strBuilder.WriteString(": out-of-scope")
- } else {
- strBuilder.WriteString(": RAA ")
- strBuilder.WriteString(fmt.Sprintf("%.0f", technicalAsset.RAA))
- strBuilder.WriteString("%")
- }
- strBuilder.WriteString("
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetTextColor(0, 0, 0)
- strBuilder.WriteString(uni(technicalAsset.Description))
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id])
- }
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-/*
-func createDataRiskQuickWins() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- assets := "assets"
- count := len(model.SortedTechnicalAssetsByQuickWinsAndTitle())
- if count == 1 {
- assets = "asset"
- }
- chapTitle := "Data Risk Quick Wins: " + strconv.Itoa(count) + " " + assets
- addHeadline(chapTitle, false)
- defineLinkTarget("{data-risk-quick-wins}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- strBuilder.WriteString("For each technical asset it was checked how many data assets at risk might " +
- "get their risk-rating reduced (partly or fully) when the risks of the technical asset are mitigated. " +
- "In general, that means the higher the quick win value is, the more data assets (left side of the Data Risk Mapping diagram) " +
- "turn from red to amber or from amber to blue by mitigating the technical asset's risks. " +
- "This list can be used to prioritize on efforts with the greatest effects of reducing data asset risks:
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Technical asset paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- for _, technicalAsset := range model.SortedTechnicalAssetsByQuickWinsAndTitle() {
- quickWins := technicalAsset.QuickWins()
- if pdf.GetY() > 260 {
- pageBreak()
- pdf.SetY(36)
- } else {
- strBuilder.WriteString("
")
- }
- risks := technicalAsset.GeneratedRisks()
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.High:
- colors.ColorHighRisk(pdf)
- case model.Medium:
- colors.ColorMediumRisk(pdf)
- case model.Low:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
-
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- posY := pdf.GetY()
- strBuilder.WriteString("")
- strBuilder.WriteString(uni(technicalAsset.Title))
- strBuilder.WriteString("")
- strBuilder.WriteString(": ")
- strBuilder.WriteString(fmt.Sprintf("%.2f", quickWins))
- strBuilder.WriteString(" Quick Wins")
- strBuilder.WriteString("
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetTextColor(0, 0, 0)
- strBuilder.WriteString(uni(technicalAsset.Description))
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[technicalAsset.Id])
- }
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-*/
-
-func addCategories(riskCategories []model.RiskCategory, severity model.RiskSeverity, bothInitialAndRemainingRisks bool, initialRisks bool, describeImpact bool, describeDescription bool) {
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- sort.Sort(model.ByRiskCategoryTitleSort(riskCategories))
- for _, riskCategory := range riskCategories {
- risks := model.GeneratedRisksByCategory[riskCategory]
- if !initialRisks {
- risks = model.ReduceToOnlyStillAtRisk(risks)
- }
- if len(risks) == 0 {
- continue
- }
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- strBuilder.WriteString("
")
- }
- var prefix string
- switch severity {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- prefix = "Critical: "
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- prefix = "High: "
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- prefix = "Elevated: "
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- prefix = "Medium: "
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- prefix = "Low: "
- default:
- pdfColorBlack()
- prefix = ""
- }
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- posY := pdf.GetY()
- strBuilder.WriteString(prefix)
- strBuilder.WriteString("")
- strBuilder.WriteString(riskCategory.Title)
- strBuilder.WriteString(": ")
- count := len(risks)
- initialStr := "Initial"
- if !initialRisks {
- initialStr = "Remaining"
- }
- remainingRisks := model.ReduceToOnlyStillAtRisk(risks)
- suffix := strconv.Itoa(count) + " " + initialStr + " Risk"
- if bothInitialAndRemainingRisks {
- suffix = strconv.Itoa(len(remainingRisks)) + " / " + strconv.Itoa(count) + " Risk"
- }
- if count != 1 {
- suffix += "s"
- }
- suffix += " - Exploitation likelihood is "
- if initialRisks {
- suffix += model.HighestExploitationLikelihood(risks).Title() + " with " + model.HighestExploitationImpact(risks).Title() + " impact."
- } else {
- suffix += model.HighestExploitationLikelihood(remainingRisks).Title() + " with " + model.HighestExploitationImpact(remainingRisks).Title() + " impact."
- }
- strBuilder.WriteString(suffix + "
")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.SetTextColor(0, 0, 0)
- if describeImpact {
- strBuilder.WriteString(firstParagraph(riskCategory.Impact))
- } else if describeDescription {
- strBuilder.WriteString(firstParagraph(riskCategory.Description))
- } else {
- strBuilder.WriteString(firstParagraph(riskCategory.Mitigation))
- }
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdf.Link(9, posY, 190, pdf.GetY()-posY+4, tocLinkIdByAssetId[riskCategory.Id])
- }
-}
-
-func firstParagraph(text string) string {
- match := firstParagraphRegEx.FindStringSubmatch(text)
- if len(match) == 0 {
- return text
- }
- return match[1]
-}
-
-func createAssignmentByFunction() {
- pdf.SetTextColor(0, 0, 0)
- title := "Assignment by Function"
- addHeadline(title, false)
- defineLinkTarget("{function-assignment}")
- currentChapterTitleBreadcrumb = title
-
- risksBusinessSideFunction := model.RisksOfOnlyBusinessSide(model.GeneratedRisksByCategory)
- risksArchitectureFunction := model.RisksOfOnlyArchitecture(model.GeneratedRisksByCategory)
- risksDevelopmentFunction := model.RisksOfOnlyDevelopment(model.GeneratedRisksByCategory)
- risksOperationFunction := model.RisksOfOnlyOperation(model.GeneratedRisksByCategory)
-
- countBusinessSideFunction := model.CountRisks(risksBusinessSideFunction)
- countArchitectureFunction := model.CountRisks(risksArchitectureFunction)
- countDevelopmentFunction := model.CountRisks(risksDevelopmentFunction)
- countOperationFunction := model.CountRisks(risksOperationFunction)
- var intro strings.Builder
- intro.WriteString("This chapter clusters and assigns the risks by functions which are most likely able to " +
- "check and mitigate them: " +
- "In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " +
- "of which " + strconv.Itoa(countBusinessSideFunction) + " should be checked by " + model.BusinessSide.Title() + ", " +
- "" + strconv.Itoa(countArchitectureFunction) + " should be checked by " + model.Architecture.Title() + ", " +
- "" + strconv.Itoa(countDevelopmentFunction) + " should be checked by " + model.Development.Title() + ", " +
- "and " + strconv.Itoa(countOperationFunction) + " should be checked by " + model.Operations.Title() + ".
")
- html := pdf.HTMLBasicNew()
- html.Write(5, intro.String())
- intro.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- oldLeft, _, _, _ := pdf.GetMargins()
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.BusinessSide.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksBusinessSideFunction) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksBusinessSideFunction, true),
- model.CriticalSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyHighRisks(risksBusinessSideFunction, true),
- model.HighSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksBusinessSideFunction, true),
- model.ElevatedSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksBusinessSideFunction, true),
- model.MediumSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyLowRisks(risksBusinessSideFunction, true),
- model.LowSeverity, true, true, false, false)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Architecture.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksArchitectureFunction) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksArchitectureFunction, true),
- model.CriticalSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyHighRisks(risksArchitectureFunction, true),
- model.HighSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksArchitectureFunction, true),
- model.ElevatedSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksArchitectureFunction, true),
- model.MediumSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyLowRisks(risksArchitectureFunction, true),
- model.LowSeverity, true, true, false, false)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Development.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksDevelopmentFunction) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksDevelopmentFunction, true),
- model.CriticalSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyHighRisks(risksDevelopmentFunction, true),
- model.HighSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksDevelopmentFunction, true),
- model.ElevatedSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksDevelopmentFunction, true),
- model.MediumSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyLowRisks(risksDevelopmentFunction, true),
- model.LowSeverity, true, true, false, false)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Operations.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksOperationFunction) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksOperationFunction, true),
- model.CriticalSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyHighRisks(risksOperationFunction, true),
- model.HighSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksOperationFunction, true),
- model.ElevatedSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksOperationFunction, true),
- model.MediumSeverity, true, true, false, false)
- addCategories(model.CategoriesOfOnlyLowRisks(risksOperationFunction, true),
- model.LowSeverity, true, true, false, false)
- }
- pdf.SetLeftMargin(oldLeft)
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-func createSTRIDE() {
- pdf.SetTextColor(0, 0, 0)
- title := "STRIDE Classification of Identified Risks"
- addHeadline(title, false)
- defineLinkTarget("{stride}")
- currentChapterTitleBreadcrumb = title
-
- risksSTRIDESpoofing := model.RisksOfOnlySTRIDESpoofing(model.GeneratedRisksByCategory)
- risksSTRIDETampering := model.RisksOfOnlySTRIDETampering(model.GeneratedRisksByCategory)
- risksSTRIDERepudiation := model.RisksOfOnlySTRIDERepudiation(model.GeneratedRisksByCategory)
- risksSTRIDEInformationDisclosure := model.RisksOfOnlySTRIDEInformationDisclosure(model.GeneratedRisksByCategory)
- risksSTRIDEDenialOfService := model.RisksOfOnlySTRIDEDenialOfService(model.GeneratedRisksByCategory)
- risksSTRIDEElevationOfPrivilege := model.RisksOfOnlySTRIDEElevationOfPrivilege(model.GeneratedRisksByCategory)
-
- countSTRIDESpoofing := model.CountRisks(risksSTRIDESpoofing)
- countSTRIDETampering := model.CountRisks(risksSTRIDETampering)
- countSTRIDERepudiation := model.CountRisks(risksSTRIDERepudiation)
- countSTRIDEInformationDisclosure := model.CountRisks(risksSTRIDEInformationDisclosure)
- countSTRIDEDenialOfService := model.CountRisks(risksSTRIDEDenialOfService)
- countSTRIDEElevationOfPrivilege := model.CountRisks(risksSTRIDEElevationOfPrivilege)
- var intro strings.Builder
- intro.WriteString("This chapter clusters and classifies the risks by STRIDE categories: " +
- "In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " +
- "of which " + strconv.Itoa(countSTRIDESpoofing) + " in the " + model.Spoofing.Title() + " category, " +
- "" + strconv.Itoa(countSTRIDETampering) + " in the " + model.Tampering.Title() + " category, " +
- "" + strconv.Itoa(countSTRIDERepudiation) + " in the " + model.Repudiation.Title() + " category, " +
- "" + strconv.Itoa(countSTRIDEInformationDisclosure) + " in the " + model.InformationDisclosure.Title() + " category, " +
- "" + strconv.Itoa(countSTRIDEDenialOfService) + " in the " + model.DenialOfService.Title() + " category, " +
- "and " + strconv.Itoa(countSTRIDEElevationOfPrivilege) + " in the " + model.ElevationOfPrivilege.Title() + " category.
")
- html := pdf.HTMLBasicNew()
- html.Write(5, intro.String())
- intro.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- oldLeft, _, _, _ := pdf.GetMargins()
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Spoofing.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDESpoofing) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDESpoofing, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDESpoofing, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDESpoofing, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDESpoofing, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDESpoofing, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Tampering.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDETampering) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDETampering, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDETampering, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDETampering, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDETampering, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDETampering, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.Repudiation.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDERepudiation) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDERepudiation, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDERepudiation, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDERepudiation, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDERepudiation, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDERepudiation, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.InformationDisclosure.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDEInformationDisclosure) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEInformationDisclosure, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEInformationDisclosure, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEInformationDisclosure, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEInformationDisclosure, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEInformationDisclosure, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.DenialOfService.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDEDenialOfService) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEDenialOfService, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEDenialOfService, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEDenialOfService, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEDenialOfService, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEDenialOfService, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetTextColor(0, 0, 0)
- html.Write(5, ""+model.ElevationOfPrivilege.Title()+"")
- pdf.SetLeftMargin(15)
- if len(risksSTRIDEElevationOfPrivilege) == 0 {
- pdf.SetTextColor(150, 150, 150)
- html.Write(5, "
n/a")
- } else {
- addCategories(model.CategoriesOfOnlyCriticalRisks(risksSTRIDEElevationOfPrivilege, true),
- model.CriticalSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyHighRisks(risksSTRIDEElevationOfPrivilege, true),
- model.HighSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyElevatedRisks(risksSTRIDEElevationOfPrivilege, true),
- model.ElevatedSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyMediumRisks(risksSTRIDEElevationOfPrivilege, true),
- model.MediumSeverity, true, true, false, true)
- addCategories(model.CategoriesOfOnlyLowRisks(risksSTRIDEElevationOfPrivilege, true),
- model.LowSeverity, true, true, false, true)
- }
- pdf.SetLeftMargin(oldLeft)
-
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
-}
-
-func createSecurityRequirements() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- chapTitle := "Security Requirements"
- addHeadline(chapTitle, false)
- defineLinkTarget("{security-requirements}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "This chapter lists the custom security requirements which have been defined for the modeled target.")
- pdfColorBlack()
- for _, title := range model.SortedKeysOfSecurityRequirements() {
- description := model.ParsedModelRoot.SecurityRequirements[title]
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- html.Write(5, ""+uni(title)+"
")
- html.Write(5, uni(description))
- }
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- html.Write(5, "This list is not complete and regulatory or law relevant security requirements have to be "+
- "taken into account as well. Also custom individual security requirements might exist for the project.")
-}
-
-func createAbuseCases() {
- pdf.SetTextColor(0, 0, 0)
- chapTitle := "Abuse Cases"
- addHeadline(chapTitle, false)
- defineLinkTarget("{abuse-cases}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "This chapter lists the custom abuse cases which have been defined for the modeled target.")
- pdfColorBlack()
- for _, title := range model.SortedKeysOfAbuseCases() {
- description := model.ParsedModelRoot.AbuseCases[title]
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- html.Write(5, ""+title+"
")
- html.Write(5, description)
- }
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- html.Write(5, "This list is not complete and regulatory or law relevant abuse cases have to be "+
- "taken into account as well. Also custom individual abuse cases might exist for the project.")
-}
-
-func createQuestions() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- questions := "Questions"
- count := len(model.ParsedModelRoot.Questions)
- if count == 1 {
- questions = "Question"
- }
- if model.QuestionsUnanswered() > 0 {
- colors.ColorModelFailure(pdf)
- }
- chapTitle := "Questions: " + strconv.Itoa(model.QuestionsUnanswered()) + " / " + strconv.Itoa(count) + " " + questions
- addHeadline(chapTitle, false)
- defineLinkTarget("{questions}")
- currentChapterTitleBreadcrumb = chapTitle
- pdfColorBlack()
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "This chapter lists custom questions that arose during the threat modeling process.")
-
- if len(model.ParsedModelRoot.Questions) == 0 {
- pdfColorLightGray()
- html.Write(5, "
")
- html.Write(5, "No custom questions arose during the threat modeling process.")
- }
- pdfColorBlack()
- for _, question := range model.SortedKeysOfQuestions() {
- answer := model.ParsedModelRoot.Questions[question]
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdfColorBlack()
- if len(strings.TrimSpace(answer)) > 0 {
- html.Write(5, ""+uni(question)+"
")
- html.Write(5, ""+uni(strings.TrimSpace(answer))+"")
- } else {
- colors.ColorModelFailure(pdf)
- html.Write(5, ""+uni(question)+"
")
- pdfColorLightGray()
- html.Write(5, "- answer pending -")
- pdfColorBlack()
- }
- }
-}
-
-func createTagListing() {
- pdf.SetTextColor(0, 0, 0)
- chapTitle := "Tag Listing"
- addHeadline(chapTitle, false)
- defineLinkTarget("{tag-listing}")
- currentChapterTitleBreadcrumb = chapTitle
-
- html := pdf.HTMLBasicNew()
- html.Write(5, "This chapter lists what tags are used by which elements.")
- pdfColorBlack()
- sorted := model.ParsedModelRoot.TagsAvailable
- sort.Strings(sorted)
- for _, tag := range sorted {
- description := "" // TODO: add some separation texts to distinguish between technical assets and data assets etc. for example?
- for _, techAsset := range model.SortedTechnicalAssetsByTitle() {
- if model.Contains(techAsset.Tags, tag) {
- if len(description) > 0 {
- description += ", "
- }
- description += techAsset.Title
- }
- for _, commLink := range techAsset.CommunicationLinksSorted() {
- if model.Contains(commLink.Tags, tag) {
- if len(description) > 0 {
- description += ", "
- }
- description += commLink.Title
- }
- }
- }
- for _, dataAsset := range model.SortedDataAssetsByTitle() {
- if model.Contains(dataAsset.Tags, tag) {
- if len(description) > 0 {
- description += ", "
- }
- description += dataAsset.Title
- }
- }
- for _, trustBoundary := range model.SortedTrustBoundariesByTitle() {
- if model.Contains(trustBoundary.Tags, tag) {
- if len(description) > 0 {
- description += ", "
- }
- description += trustBoundary.Title
- }
- }
- for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() {
- if model.Contains(sharedRuntime.Tags, tag) {
- if len(description) > 0 {
- description += ", "
- }
- description += sharedRuntime.Title
- }
- }
- if len(description) > 0 {
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdfColorBlack()
- html.Write(5, ""+tag+"
")
- html.Write(5, description)
- }
- }
-}
-
-func createRiskCategories() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- // category title
- title := "Identified Risks by Vulnerability Category"
- pdfColorBlack()
- addHeadline(title, false)
- defineLinkTarget("{intro-risks-by-vulnerability-category}")
- html := pdf.HTMLBasicNew()
- var text strings.Builder
- text.WriteString("In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " +
- "of which " +
- "" + strconv.Itoa(len(model.FilteredByOnlyCriticalRisks())) + " are rated as critical, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyHighRisks())) + " as high, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyElevatedRisks())) + " as elevated, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyMediumRisks())) + " as medium, " +
- "and " + strconv.Itoa(len(model.FilteredByOnlyLowRisks())) + " as low. " +
- "
These risks are distributed across " + strconv.Itoa(len(model.GeneratedRisksByCategory)) + " vulnerability categories. ")
- text.WriteString("The following sub-chapters of this section describe each identified risk category.") // TODO more explanation text
- html.Write(5, text.String())
- text.Reset()
- currentChapterTitleBreadcrumb = title
- for _, category := range model.SortedRiskCategories() {
- risks := model.SortedRisksOfCategory(category)
-
- // category color
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
-
- // category title
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- title := category.Title + ": " + suffix
- addHeadline(uni(title), true)
- pdfColorBlack()
- defineLinkTarget("{" + category.Id + "}")
- currentChapterTitleBreadcrumb = title
-
- // category details
- var text strings.Builder
- cweLink := "n/a"
- if category.CWE > 0 {
- cweLink = "CWE " +
- strconv.Itoa(category.CWE) + ""
- }
- text.WriteString("Description (" + category.STRIDE.Title() + "): " + cweLink + "
")
- text.WriteString(category.Description)
- text.WriteString("
Impact
")
- text.WriteString(category.Impact)
- text.WriteString("
Detection Logic
")
- text.WriteString(category.DetectionLogic)
- text.WriteString("
Risk Rating
")
- text.WriteString(category.RiskAssessment)
- html.Write(5, text.String())
- text.Reset()
- colors.ColorRiskStatusFalsePositive(pdf)
- text.WriteString("
False Positives
")
- text.WriteString(category.FalsePositives)
- html.Write(5, text.String())
- text.Reset()
- colors.ColorRiskStatusMitigated(pdf)
- text.WriteString("
Mitigation (" + category.Function.Title() + "): " + category.Action + "
")
- text.WriteString(category.Mitigation)
-
- asvsChapter := category.ASVS
- if len(asvsChapter) == 0 {
- text.WriteString("
ASVS Chapter: n/a")
- } else {
- text.WriteString("
ASVS Chapter: " + asvsChapter + "")
- }
-
- cheatSheetLink := category.CheatSheet
- if len(cheatSheetLink) == 0 {
- cheatSheetLink = "n/a"
- } else {
- lastLinkParts := strings.Split(cheatSheetLink, "/")
- linkText := lastLinkParts[len(lastLinkParts)-1]
- if strings.HasSuffix(linkText, ".html") || strings.HasSuffix(linkText, ".htm") {
- var extension = filepath.Ext(linkText)
- linkText = linkText[0 : len(linkText)-len(extension)]
- }
- cheatSheetLink = "" + linkText + ""
- }
- text.WriteString("
Cheat Sheet: " + cheatSheetLink)
-
- text.WriteString("
Check
")
- text.WriteString(category.Check)
-
- html.Write(5, text.String())
- text.Reset()
- pdf.SetTextColor(0, 0, 0)
-
- // risk details
- pageBreak()
- pdf.SetY(36)
- text.WriteString("Risk Findings
")
- times := strconv.Itoa(len(risks)) + " time"
- if len(risks) > 1 {
- times += "s"
- }
- text.WriteString("The risk " + category.Title + " was found " + times + " in the analyzed architecture to be " +
- "potentially possible. Each spot should be checked individually by reviewing the implementation whether all " +
- "controls have been applied properly in order to mitigate each risk.
")
- html.Write(5, text.String())
- text.Reset()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.
")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- oldLeft, _, _, _ := pdf.GetMargins()
- headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false
- for _, risk := range risks {
- text.WriteString("
")
- html.Write(5, text.String())
- text.Reset()
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- }
- switch risk.Severity {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- if !headlineCriticalWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft)
- text.WriteString("
Critical Risk Severity
")
- html.Write(5, text.String())
- text.Reset()
- headlineCriticalWritten = true
- }
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- if !headlineHighWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft)
- text.WriteString("
High Risk Severity
")
- html.Write(5, text.String())
- text.Reset()
- headlineHighWritten = true
- }
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- if !headlineElevatedWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft)
- text.WriteString("
Elevated Risk Severity
")
- html.Write(5, text.String())
- text.Reset()
- headlineElevatedWritten = true
- }
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- if !headlineMediumWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft)
- text.WriteString("
Medium Risk Severity
")
- html.Write(5, text.String())
- text.Reset()
- headlineMediumWritten = true
- }
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- if !headlineLowWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft)
- text.WriteString("
Low Risk Severity
")
- html.Write(5, text.String())
- text.Reset()
- headlineLowWritten = true
- }
- default:
- pdfColorBlack()
- }
- if !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() {
- pdfColorBlack()
- }
- posY := pdf.GetY()
- pdf.SetLeftMargin(oldLeft + 10)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.")
- text.WriteString("
")
- html.Write(5, text.String())
- text.Reset()
- pdfColorGray()
- pdf.SetFont("Helvetica", "", fontSizeVerySmall)
- pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- if len(risk.MostRelevantSharedRuntimeId) > 0 {
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantSharedRuntimeId])
- } else if len(risk.MostRelevantTrustBoundaryId) > 0 {
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantTrustBoundaryId])
- } else if len(risk.MostRelevantTechnicalAssetId) > 0 {
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.MostRelevantTechnicalAssetId])
- }
- writeRiskTrackingStatus(risk)
- pdf.SetLeftMargin(oldLeft)
- html.Write(5, text.String())
- text.Reset()
- }
- pdf.SetLeftMargin(oldLeft)
- }
-}
-
-func writeRiskTrackingStatus(risk model.Risk) {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- tracking := risk.GetRiskTracking()
- pdfColorBlack()
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- switch tracking.Status {
- case model.Unchecked:
- colors.ColorRiskStatusUnchecked(pdf)
- case model.InDiscussion:
- colors.ColorRiskStatusInDiscussion(pdf)
- case model.Accepted:
- colors.ColorRiskStatusAccepted(pdf)
- case model.InProgress:
- colors.ColorRiskStatusInProgress(pdf)
- case model.Mitigated:
- colors.ColorRiskStatusMitigated(pdf)
- case model.FalsePositive:
- colors.ColorRiskStatusFalsePositive(pdf)
- default:
- pdfColorBlack()
- }
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- if tracking.Status == model.Unchecked {
- pdf.SetFont("Helvetica", "B", fontSizeSmall)
- }
- pdf.CellFormat(25, 4, tracking.Status.Title(), "0", 0, "B", false, 0, "")
- if tracking.Status != model.Unchecked {
- dateStr := tracking.Date.Format("2006-01-02")
- if dateStr == "0001-01-01" {
- dateStr = ""
- }
- justificationStr := tracking.Justification
- pdfColorGray()
- pdf.CellFormat(20, 4, dateStr, "0", 0, "B", false, 0, "")
- pdf.CellFormat(35, 4, uni(tracking.CheckedBy), "0", 0, "B", false, 0, "")
- pdf.CellFormat(35, 4, uni(tracking.Ticket), "0", 0, "B", false, 0, "")
- pdf.Ln(-1)
- pdfColorBlack()
- pdf.CellFormat(10, 4, "", "0", 0, "", false, 0, "")
- pdf.MultiCell(170, 4, uni(justificationStr), "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- } else {
- pdf.Ln(-1)
- }
- pdfColorBlack()
-}
-
-func createTechnicalAssets() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- // category title
- title := "Identified Risks by Technical Asset"
- pdfColorBlack()
- addHeadline(title, false)
- defineLinkTarget("{intro-risks-by-technical-asset}")
- html := pdf.HTMLBasicNew()
- var text strings.Builder
- text.WriteString("In total " + strconv.Itoa(model.TotalRiskCount()) + " potential risks have been identified during the threat modeling process " +
- "of which " +
- "" + strconv.Itoa(len(model.FilteredByOnlyCriticalRisks())) + " are rated as critical, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyHighRisks())) + " as high, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyElevatedRisks())) + " as elevated, " +
- "" + strconv.Itoa(len(model.FilteredByOnlyMediumRisks())) + " as medium, " +
- "and " + strconv.Itoa(len(model.FilteredByOnlyLowRisks())) + " as low. " +
- "
These risks are distributed across " + strconv.Itoa(len(model.InScopeTechnicalAssets())) + " in-scope technical assets. ")
- text.WriteString("The following sub-chapters of this section describe each identified risk grouped by technical asset. ") // TODO more explanation text
- text.WriteString("The RAA value of a technical asset is the calculated \"Relative Attacker Attractiveness\" value in percent.")
- html.Write(5, text.String())
- text.Reset()
- currentChapterTitleBreadcrumb = title
- for _, technicalAsset := range model.SortedTechnicalAssetsByRiskSeverityAndTitle() {
- risks := technicalAsset.GeneratedRisks()
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- if technicalAsset.OutOfScope {
- pdfColorOutOfScope()
- suffix = "out-of-scope"
- } else {
- switch model.HighestSeverityStillAtRisk(risks) {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
- }
-
- // asset title
- title := technicalAsset.Title + ": " + suffix
- addHeadline(uni(title), true)
- pdfColorBlack()
- defineLinkTarget("{" + technicalAsset.Id + "}")
- currentChapterTitleBreadcrumb = title
-
- // asset description
- html := pdf.HTMLBasicNew()
- var text strings.Builder
- text.WriteString("Description
")
- text.WriteString(uni(technicalAsset.Description))
- html.Write(5, text.String())
- text.Reset()
- pdf.SetTextColor(0, 0, 0)
-
- // and more metadata of asset in tabular view
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.Ln(-1)
- if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdfColorBlack()
- pdf.CellFormat(190, 6, "Identified Risks of Asset", "0", 0, "", false, 0, "")
- pdfColorGray()
- oldLeft, _, _, _ := pdf.GetMargins()
- if len(risks) > 0 {
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- html.Write(5, "Risk finding paragraphs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(15)
- /*
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(185, 6, strconv.Itoa(len(risks))+" risks in total were identified", "0", 0, "", false, 0, "")
- */
- headlineCriticalWritten, headlineHighWritten, headlineElevatedWritten, headlineMediumWritten, headlineLowWritten := false, false, false, false, false
- pdf.Ln(-1)
- for _, risk := range risks {
- text.WriteString("
")
- html.Write(5, text.String())
- text.Reset()
- if pdf.GetY() > 250 { // 250 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- switch risk.Severity {
- case model.CriticalSeverity:
- colors.ColorCriticalRisk(pdf)
- if !headlineCriticalWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft + 3)
- html.Write(5, "
Critical Risk Severity
")
- headlineCriticalWritten = true
- }
- case model.HighSeverity:
- colors.ColorHighRisk(pdf)
- if !headlineHighWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft + 3)
- html.Write(5, "
High Risk Severity
")
- headlineHighWritten = true
- }
- case model.ElevatedSeverity:
- colors.ColorElevatedRisk(pdf)
- if !headlineElevatedWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft + 3)
- html.Write(5, "
Elevated Risk Severity
")
- headlineElevatedWritten = true
- }
- case model.MediumSeverity:
- colors.ColorMediumRisk(pdf)
- if !headlineMediumWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft + 3)
- html.Write(5, "
Medium Risk Severity
")
- headlineMediumWritten = true
- }
- case model.LowSeverity:
- colors.ColorLowRisk(pdf)
- if !headlineLowWritten {
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(oldLeft + 3)
- html.Write(5, "
Low Risk Severity
")
- headlineLowWritten = true
- }
- default:
- pdfColorBlack()
- }
- if !risk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() {
- pdfColorBlack()
- }
- posY := pdf.GetY()
- pdf.SetLeftMargin(oldLeft + 10)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- text.WriteString(uni(risk.Title) + ": Exploitation likelihood is " + risk.ExploitationLikelihood.Title() + " with " + risk.ExploitationImpact.Title() + " impact.")
- text.WriteString("
")
- html.Write(5, text.String())
- text.Reset()
-
- pdf.SetFont("Helvetica", "", fontSizeVerySmall)
- pdfColorGray()
- pdf.MultiCell(215, 5, uni(risk.SyntheticId), "0", "0", false)
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[risk.Category.Id])
- pdf.SetFont("Helvetica", "", fontSizeBody)
- writeRiskTrackingStatus(risk)
- pdf.SetLeftMargin(oldLeft)
- }
- } else {
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdfColorGray()
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetLeftMargin(15)
- text := "No risks were identified."
- if technicalAsset.OutOfScope {
- text = "Asset was defined as out-of-scope."
- }
- html.Write(5, text)
- pdf.Ln(-1)
- }
- pdf.SetLeftMargin(oldLeft)
-
- pdf.Ln(-1)
- pdf.Ln(4)
- if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 6, "Asset Information", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Id, "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Type.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Usage.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "RAA:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- textRAA := fmt.Sprintf("%.0f", technicalAsset.RAA) + " %"
- if technicalAsset.OutOfScope {
- pdfColorGray()
- textRAA = "out-of-scope"
- }
- pdf.MultiCell(145, 6, textRAA, "0", "0", false)
- pdfColorBlack()
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Size:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Size.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Technology:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Technology.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := technicalAsset.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Internet:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Internet), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Machine:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Machine.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Encryption:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, technicalAsset.Encryption.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Multi-Tenant:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.MultiTenant), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Redundant:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.Redundant), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Custom-Developed:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.CustomDevelopedParts), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Client by Human:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, strconv.FormatBool(technicalAsset.UsedAsClientByHuman), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Data Processed:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsProcessedText := ""
- for _, dataAsset := range technicalAsset.DataAssetsProcessedSorted() {
- if len(dataAssetsProcessedText) > 0 {
- dataAssetsProcessedText += ", "
- }
- dataAssetsProcessedText += dataAsset.Title
- }
- if len(dataAssetsProcessedText) == 0 {
- pdfColorGray()
- dataAssetsProcessedText = "none"
- }
- pdf.MultiCell(145, 6, uni(dataAssetsProcessedText), "0", "0", false)
-
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Data Stored:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsStoredText := ""
- for _, dataAsset := range technicalAsset.DataAssetsStoredSorted() {
- if len(dataAssetsStoredText) > 0 {
- dataAssetsStoredText += ", "
- }
- dataAssetsStoredText += dataAsset.Title
- }
- if len(dataAssetsStoredText) == 0 {
- pdfColorGray()
- dataAssetsStoredText = "none"
- }
- pdf.MultiCell(145, 6, uni(dataAssetsStoredText), "0", "0", false)
-
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Formats Accepted:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- formatsAcceptedText := ""
- for _, formatAccepted := range technicalAsset.DataFormatsAcceptedSorted() {
- if len(formatsAcceptedText) > 0 {
- formatsAcceptedText += ", "
- }
- formatsAcceptedText += formatAccepted.Title()
- }
- if len(formatsAcceptedText) == 0 {
- pdfColorGray()
- formatsAcceptedText = "none of the special data formats accepted"
- }
- pdf.MultiCell(145, 6, formatsAcceptedText, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.Ln(4)
- if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 6, "Asset Rating", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, uni(technicalAsset.Owner), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, technicalAsset.Confidentiality.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, technicalAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, technicalAsset.Integrity.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, technicalAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, technicalAsset.Availability.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, technicalAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, uni(technicalAsset.JustificationCiaRating), "0", "0", false)
-
- if technicalAsset.OutOfScope {
- pdf.Ln(-1)
- pdf.Ln(4)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 6, "Asset Out-of-Scope Justification", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.MultiCell(190, 6, uni(technicalAsset.JustificationOutOfScope), "0", "0", false)
- pdf.Ln(-1)
- }
- pdf.Ln(-1)
-
- if len(technicalAsset.CommunicationLinks) > 0 {
- pdf.Ln(-1)
- if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 6, "Outgoing Communication Links: "+strconv.Itoa(len(technicalAsset.CommunicationLinks)), "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Target technical asset names are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- for _, outgoingCommLink := range technicalAsset.CommunicationLinksSorted() {
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(185, 6, uni(outgoingCommLink.Title)+" (outgoing)", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.MultiCell(185, 6, uni(outgoingCommLink.Description), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdf.Ln(-1)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Target:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(125, 6, uni(model.ParsedModelRoot.TechnicalAssets[outgoingCommLink.TargetId].Title), "0", "0", false)
- pdf.Link(60, pdf.GetY()-5, 70, 5, tocLinkIdByAssetId[outgoingCommLink.TargetId])
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, outgoingCommLink.Protocol.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Protocol.IsEncrypted()), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, outgoingCommLink.Authentication.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, outgoingCommLink.Authorization.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.Readonly), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, outgoingCommLink.Usage.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := outgoingCommLink.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.VPN), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(outgoingCommLink.IpFiltered), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsSentText := ""
- for _, dataAsset := range outgoingCommLink.DataAssetsSentSorted() {
- if len(dataAssetsSentText) > 0 {
- dataAssetsSentText += ", "
- }
- dataAssetsSentText += dataAsset.Title
- }
- if len(dataAssetsSentText) == 0 {
- pdfColorGray()
- dataAssetsSentText = "none"
- }
- pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsReceivedText := ""
- for _, dataAsset := range outgoingCommLink.DataAssetsReceivedSorted() {
- if len(dataAssetsReceivedText) > 0 {
- dataAssetsReceivedText += ", "
- }
- dataAssetsReceivedText += dataAsset.Title
- }
- if len(dataAssetsReceivedText) == 0 {
- pdfColorGray()
- dataAssetsReceivedText = "none"
- }
- pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false)
- pdf.Ln(-1)
- }
- }
-
- incomingCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- if len(incomingCommLinks) > 0 {
- pdf.Ln(-1)
- if pdf.GetY() > 260 { // 260 only for major titles (to avoid "Schusterjungen"), for the rest attributes 270
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 6, "Incoming Communication Links: "+strconv.Itoa(len(incomingCommLinks)), "0", 0, "", false, 0, "")
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Source technical asset names are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.Ln(-1)
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- for _, incomingCommLink := range incomingCommLinks {
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorBlack()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(185, 6, uni(incomingCommLink.Title)+" (incoming)", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.MultiCell(185, 6, uni(incomingCommLink.Description), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdf.Ln(-1)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Source:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, uni(model.ParsedModelRoot.TechnicalAssets[incomingCommLink.SourceId].Title), "0", "0", false)
- pdf.Link(60, pdf.GetY()-5, 70, 5, tocLinkIdByAssetId[incomingCommLink.SourceId])
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Protocol:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, incomingCommLink.Protocol.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Encrypted:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Protocol.IsEncrypted()), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Authentication:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, incomingCommLink.Authentication.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Authorization:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, incomingCommLink.Authorization.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Read-Only:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.Readonly), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Usage:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, incomingCommLink.Usage.String(), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := incomingCommLink.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(140, 6, uni(tagsUsedText), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "VPN:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.VPN), "0", "0", false)
- if pdf.GetY() > 270 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "IP-Filtered:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(140, 6, strconv.FormatBool(incomingCommLink.IpFiltered), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Data Received:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsSentText := ""
- // yep, here we reverse the sent/received direction, as it's the incoming stuff
- for _, dataAsset := range incomingCommLink.DataAssetsSentSorted() {
- if len(dataAssetsSentText) > 0 {
- dataAssetsSentText += ", "
- }
- dataAssetsSentText += dataAsset.Title
- }
- if len(dataAssetsSentText) == 0 {
- pdfColorGray()
- dataAssetsSentText = "none"
- }
- pdf.MultiCell(140, 6, uni(dataAssetsSentText), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(15, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(35, 6, "Data Sent:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- dataAssetsReceivedText := ""
- // yep, here we reverse the sent/received direction, as it's the incoming stuff
- for _, dataAsset := range incomingCommLink.DataAssetsReceivedSorted() {
- if len(dataAssetsReceivedText) > 0 {
- dataAssetsReceivedText += ", "
- }
- dataAssetsReceivedText += dataAsset.Title
- }
- if len(dataAssetsReceivedText) == 0 {
- pdfColorGray()
- dataAssetsReceivedText = "none"
- }
- pdf.MultiCell(140, 6, uni(dataAssetsReceivedText), "0", "0", false)
- pdf.Ln(-1)
- }
- }
- }
-}
-
-func createDataAssets() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- title := "Identified Data Breach Probabilities by Data Asset"
- pdfColorBlack()
- addHeadline(title, false)
- defineLinkTarget("{intro-risks-by-data-asset}")
- html := pdf.HTMLBasicNew()
- html.Write(5, "In total "+strconv.Itoa(model.TotalRiskCount())+" potential risks have been identified during the threat modeling process "+
- "of which "+
- ""+strconv.Itoa(len(model.FilteredByOnlyCriticalRisks()))+" are rated as critical, "+
- ""+strconv.Itoa(len(model.FilteredByOnlyHighRisks()))+" as high, "+
- ""+strconv.Itoa(len(model.FilteredByOnlyElevatedRisks()))+" as elevated, "+
- ""+strconv.Itoa(len(model.FilteredByOnlyMediumRisks()))+" as medium, "+
- "and "+strconv.Itoa(len(model.FilteredByOnlyLowRisks()))+" as low. "+
- "
These risks are distributed across "+strconv.Itoa(len(model.ParsedModelRoot.DataAssets))+" data assets. ")
- html.Write(5, "The following sub-chapters of this section describe the derived data breach probabilities grouped by data asset.
") // TODO more explanation text
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdfColorGray()
- html.Write(5, "Technical asset names and risk IDs are clickable and link to the corresponding chapter.")
- pdf.SetFont("Helvetica", "", fontSizeBody)
- currentChapterTitleBreadcrumb = title
- for _, dataAsset := range model.SortedDataAssetsByDataBreachProbabilityAndTitle() {
- if pdf.GetY() > 280 { // 280 as only small font previously (not 250)
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- pdfColorBlack()
- switch dataAsset.IdentifiedDataBreachProbabilityStillAtRisk() {
- case model.Probable:
- colors.ColorHighRisk(pdf)
- case model.Possible:
- colors.ColorMediumRisk(pdf)
- case model.Improbable:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if !dataAsset.IsDataBreachPotentialStillAtRisk() {
- pdfColorBlack()
- }
- risks := dataAsset.IdentifiedDataBreachProbabilityRisks()
- countStillAtRisk := len(model.ReduceToOnlyStillAtRisk(risks))
- suffix := strconv.Itoa(countStillAtRisk) + " / " + strconv.Itoa(len(risks)) + " Risk"
- if len(risks) != 1 {
- suffix += "s"
- }
- title := uni(dataAsset.Title) + ": " + suffix
- addHeadline(title, true)
- defineLinkTarget("{data:" + dataAsset.Id + "}")
- pdfColorBlack()
- html.Write(5, uni(dataAsset.Description))
- html.Write(5, "
")
-
- pdf.SetFont("Helvetica", "", fontSizeBody)
- /*
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Indirect Breach:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- probability := dataAsset.IdentifiedDataBreachProbability()
- dataBreachText := probability.String()
- switch probability {
- case model.Probable:
- colors.ColorHighRisk(pdf)
- case model.Possible:
- colors.ColorMediumRisk(pdf)
- case model.Improbable:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if !dataAsset.IsDataBreachPotentialStillAtRisk() {
- pdfColorBlack()
- dataBreachText = "none"
- }
- pdf.MultiCell(145, 6, dataBreachText, "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- */
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, dataAsset.Id, "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Usage:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, dataAsset.Usage.String(), "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Quantity:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, dataAsset.Quantity.String(), "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := dataAsset.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Origin:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, uni(dataAsset.Origin), "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Owner:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, uni(dataAsset.Owner), "0", "0", false)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Confidentiality:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, dataAsset.Confidentiality.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, dataAsset.Confidentiality.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Integrity:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, dataAsset.Integrity.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, dataAsset.Integrity.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Availability:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.CellFormat(40, 6, dataAsset.Availability.String(), "0", 0, "", false, 0, "")
- pdfColorGray()
- pdf.CellFormat(115, 6, dataAsset.Availability.RatingStringInScale(), "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.Ln(-1)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "CIA-Justification:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, uni(dataAsset.JustificationCiaRating), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Processed by:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- processedByText := ""
- for _, dataAsset := range dataAsset.ProcessedByTechnicalAssetsSorted() {
- if len(processedByText) > 0 {
- processedByText += ", "
- }
- processedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back
- }
- if len(processedByText) == 0 {
- pdfColorGray()
- processedByText = "none"
- }
- pdf.MultiCell(145, 6, uni(processedByText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Stored by:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- storedByText := ""
- for _, dataAsset := range dataAsset.StoredByTechnicalAssetsSorted() {
- if len(storedByText) > 0 {
- storedByText += ", "
- }
- storedByText += dataAsset.Title // TODO add link to technical asset detail chapter and back
- }
- if len(storedByText) == 0 {
- pdfColorGray()
- storedByText = "none"
- }
- pdf.MultiCell(145, 6, uni(storedByText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Sent via:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- sentViaText := ""
- for _, commLink := range dataAsset.SentViaCommLinksSorted() {
- if len(sentViaText) > 0 {
- sentViaText += ", "
- }
- sentViaText += commLink.Title // TODO add link to technical asset detail chapter and back
- }
- if len(sentViaText) == 0 {
- pdfColorGray()
- sentViaText = "none"
- }
- pdf.MultiCell(145, 6, uni(sentViaText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Received via:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- receivedViaText := ""
- for _, commLink := range dataAsset.ReceivedViaCommLinksSorted() {
- if len(receivedViaText) > 0 {
- receivedViaText += ", "
- }
- receivedViaText += commLink.Title // TODO add link to technical asset detail chapter and back
- }
- if len(receivedViaText) == 0 {
- pdfColorGray()
- receivedViaText = "none"
- }
- pdf.MultiCell(145, 6, uni(receivedViaText), "0", "0", false)
-
- /*
- // where is this data asset at risk (i.e. why)
- risksByTechAssetId := dataAsset.IdentifiedRisksByResponsibleTechnicalAssetId()
- techAssetsResponsible := make([]model.TechnicalAsset, 0)
- for techAssetId, _ := range risksByTechAssetId {
- techAssetsResponsible = append(techAssetsResponsible, model.ParsedModelRoot.TechnicalAssets[techAssetId])
- }
- sort.Sort(model.ByTechnicalAssetRiskSeverityAndTitleSortStillAtRisk(techAssetsResponsible))
- assetStr := "assets"
- if len(techAssetsResponsible) == 1 {
- assetStr = "asset"
- }
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Risk via:", "0", 0, "", false, 0, "")
- if len(techAssetsResponsible) == 0 {
- pdfColorGray()
- pdf.MultiCell(145, 6, "This data asset is not directly at risk via any technical asset.", "0", "0", false)
- } else {
- pdfColorBlack()
- pdf.MultiCell(145, 6, "This data asset is at direct risk via "+strconv.Itoa(len(techAssetsResponsible))+" technical "+assetStr+":", "0", "0", false)
- for _, techAssetResponsible := range techAssetsResponsible {
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- switch model.HighestSeverityStillAtRisk(techAssetResponsible.GeneratedRisks()) {
- case model.High:
- colors.ColorHighRisk(pdf)
- case model.Medium:
- colors.ColorMediumRisk(pdf)
- case model.Low:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- risks := techAssetResponsible.GeneratedRisks()
- if len(model.ReduceToOnlyStillAtRisk(risks)) == 0 {
- pdfColorBlack()
- }
- riskStr := "risks"
- if len(risks) == 1 {
- riskStr = "risk"
- }
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- posY := pdf.GetY()
- risksResponsible := techAssetResponsible.GeneratedRisks()
- risksResponsibleStillAtRisk := model.ReduceToOnlyStillAtRisk(risksResponsible)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.MultiCell(185, 6, uni(techAssetResponsible.Title)+": "+strconv.Itoa(len(risksResponsibleStillAtRisk))+" / "+strconv.Itoa(len(risksResponsible))+" "+riskStr, "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[techAssetResponsible.Id])
- }
- pdfColorBlack()
- }
- */
-
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Data Breach:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- dataBreachProbability := dataAsset.IdentifiedDataBreachProbabilityStillAtRisk()
- riskText := dataBreachProbability.String()
- switch dataBreachProbability {
- case model.Probable:
- colors.ColorHighRisk(pdf)
- case model.Possible:
- colors.ColorMediumRisk(pdf)
- case model.Improbable:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if !dataAsset.IsDataBreachPotentialStillAtRisk() {
- pdfColorBlack()
- riskText = "none"
- }
- pdf.MultiCell(145, 6, riskText, "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
-
- // how can is this data asset be indirectly lost (i.e. why)
- dataBreachRisksStillAtRisk := dataAsset.IdentifiedDataBreachProbabilityRisksStillAtRisk()
- sort.Sort(model.ByDataBreachProbabilitySort(dataBreachRisksStillAtRisk))
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Data Breach Risks:", "0", 0, "", false, 0, "")
- if len(dataBreachRisksStillAtRisk) == 0 {
- pdfColorGray()
- pdf.MultiCell(145, 6, "This data asset has no data breach potential.", "0", "0", false)
- } else {
- pdfColorBlack()
- riskRemainingStr := "risks"
- if countStillAtRisk == 1 {
- riskRemainingStr = "risk"
- }
- pdf.MultiCell(145, 6, "This data asset has data breach potential because of "+
- ""+strconv.Itoa(countStillAtRisk)+" remaining "+riskRemainingStr+":", "0", "0", false)
- for _, dataBreachRisk := range dataBreachRisksStillAtRisk {
- if pdf.GetY() > 280 { // 280 as only small font here
- pageBreak()
- pdf.SetY(36)
- }
- switch dataBreachRisk.DataBreachProbability {
- case model.Probable:
- colors.ColorHighRisk(pdf)
- case model.Possible:
- colors.ColorMediumRisk(pdf)
- case model.Improbable:
- colors.ColorLowRisk(pdf)
- default:
- pdfColorBlack()
- }
- if !dataBreachRisk.GetRiskTrackingStatusDefaultingUnchecked().IsStillAtRisk() {
- pdfColorBlack()
- }
- pdf.CellFormat(10, 6, "", "0", 0, "", false, 0, "")
- posY := pdf.GetY()
- pdf.SetFont("Helvetica", "", fontSizeVerySmall)
- pdf.MultiCell(185, 5, dataBreachRisk.DataBreachProbability.Title()+": "+uni(dataBreachRisk.SyntheticId), "0", "0", false)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.Link(20, posY, 180, pdf.GetY()-posY, tocLinkIdByAssetId[dataBreachRisk.Category.Id])
- }
- pdfColorBlack()
- }
- }
-}
-
-func createTrustBoundaries() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- title := "Trust Boundaries"
- pdfColorBlack()
- addHeadline(title, false)
-
- html := pdf.HTMLBasicNew()
- word := "has"
- if len(model.ParsedModelRoot.TrustBoundaries) > 1 {
- word = "have"
- }
- html.Write(5, "In total "+strconv.Itoa(len(model.ParsedModelRoot.TrustBoundaries))+" trust boundaries "+word+" been "+
- "modeled during the threat modeling process.")
- currentChapterTitleBreadcrumb = title
- for _, trustBoundary := range model.SortedTrustBoundariesByTitle() {
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- colors.ColorTwilight(pdf)
- if !trustBoundary.Type.IsNetworkBoundary() {
- pdfColorLightGray()
- }
- html.Write(5, ""+uni(trustBoundary.Title)+"
")
- defineLinkTarget("{boundary:" + trustBoundary.Id + "}")
- html.Write(5, uni(trustBoundary.Description))
- html.Write(5, "
")
-
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, trustBoundary.Id, "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Type:", "0", 0, "", false, 0, "")
- colors.ColorTwilight(pdf)
- if !trustBoundary.Type.IsNetworkBoundary() {
- pdfColorLightGray()
- }
- pdf.MultiCell(145, 6, trustBoundary.Type.String(), "0", "0", false)
- pdfColorBlack()
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := trustBoundary.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Assets inside:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- assetsInsideText := ""
- for _, assetKey := range trustBoundary.TechnicalAssetsInside {
- if len(assetsInsideText) > 0 {
- assetsInsideText += ", "
- }
- assetsInsideText += model.ParsedModelRoot.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back
- }
- if len(assetsInsideText) == 0 {
- pdfColorGray()
- assetsInsideText = "none"
- }
- pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Boundaries nested:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- boundariesNestedText := ""
- for _, assetKey := range trustBoundary.TrustBoundariesNested {
- if len(boundariesNestedText) > 0 {
- boundariesNestedText += ", "
- }
- boundariesNestedText += model.ParsedModelRoot.TrustBoundaries[assetKey].Title
- }
- if len(boundariesNestedText) == 0 {
- pdfColorGray()
- boundariesNestedText = "none"
- }
- pdf.MultiCell(145, 6, uni(boundariesNestedText), "0", "0", false)
- }
-}
-
-func createSharedRuntimes() {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- title := "Shared Runtimes"
- pdfColorBlack()
- addHeadline(title, false)
-
- html := pdf.HTMLBasicNew()
- word, runtime := "has", "runtime"
- if len(model.ParsedModelRoot.SharedRuntimes) > 1 {
- word, runtime = "have", "runtimes"
- }
- html.Write(5, "In total "+strconv.Itoa(len(model.ParsedModelRoot.SharedRuntimes))+" shared "+runtime+" "+word+" been "+
- "modeled during the threat modeling process.")
- currentChapterTitleBreadcrumb = title
- for _, sharedRuntime := range model.SortedSharedRuntimesByTitle() {
- pdfColorBlack()
- if pdf.GetY() > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- html.Write(5, "
")
- }
- html.Write(5, ""+uni(sharedRuntime.Title)+"
")
- defineLinkTarget("{runtime:" + sharedRuntime.Id + "}")
- html.Write(5, uni(sharedRuntime.Description))
- html.Write(5, "
")
-
- pdf.SetFont("Helvetica", "", fontSizeBody)
-
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "ID:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(145, 6, sharedRuntime.Id, "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Tags:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- tagsUsedText := ""
- sorted := sharedRuntime.Tags
- sort.Strings(sorted)
- for _, tag := range sorted {
- if len(tagsUsedText) > 0 {
- tagsUsedText += ", "
- }
- tagsUsedText += tag
- }
- if len(tagsUsedText) == 0 {
- pdfColorGray()
- tagsUsedText = "none"
- }
- pdf.MultiCell(145, 6, uni(tagsUsedText), "0", "0", false)
-
- if pdf.GetY() > 265 {
- pageBreak()
- pdf.SetY(36)
- }
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(40, 6, "Assets running:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- assetsInsideText := ""
- for _, assetKey := range sharedRuntime.TechnicalAssetsRunning {
- if len(assetsInsideText) > 0 {
- assetsInsideText += ", "
- }
- assetsInsideText += model.ParsedModelRoot.TechnicalAssets[assetKey].Title // TODO add link to technical asset detail chapter and back
- }
- if len(assetsInsideText) == 0 {
- pdfColorGray()
- assetsInsideText = "none"
- }
- pdf.MultiCell(145, 6, uni(assetsInsideText), "0", "0", false)
- }
-}
-
-func createRiskRulesChecked(modelFilename string, skipRiskRules string, buildTimestamp string, modelHash string, customRiskRules map[string]model.CustomRiskRule) {
- pdf.SetTextColor(0, 0, 0)
- title := "Risk Rules Checked by Threagile"
- addHeadline(title, false)
- defineLinkTarget("{risk-rules-checked}")
- currentChapterTitleBreadcrumb = title
-
- html := pdf.HTMLBasicNew()
- var strBuilder strings.Builder
- pdfColorGray()
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- timestamp := time.Now()
- strBuilder.WriteString("Threagile Version: " + model.ThreagileVersion)
- strBuilder.WriteString("
Threagile Build Timestamp: " + buildTimestamp)
- strBuilder.WriteString("
Threagile Execution Timestamp: " + timestamp.Format("20060102150405"))
- strBuilder.WriteString("
Model Filename: " + modelFilename)
- strBuilder.WriteString("
Model Hash (SHA256): " + modelHash)
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
- pdfColorBlack()
- pdf.SetFont("Helvetica", "", fontSizeBody)
- strBuilder.WriteString("
Threagile (see https://threagile.io for more details) is an open-source toolkit for agile threat modeling, created by Christian Schneider (https://christian-schneider.net): It allows to model an architecture with its assets in an agile fashion as a YAML file " +
- "directly inside the IDE. Upon execution of the Threagile toolkit all standard risk rules (as well as individual custom rules if present) " +
- "are checked against the architecture model. At the time the Threagile toolkit was executed on the model input file " +
- "the following risk rules were checked:")
- html.Write(5, strBuilder.String())
- strBuilder.Reset()
-
- // TODO use the new plugin system to discover risk rules instead of hard-coding them here:
- skippedRules := strings.Split(skipRiskRules, ",")
- skipped := ""
- pdf.Ln(-1)
-
- for id, customRule := range customRiskRules {
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+customRule.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "I", fontSizeBody)
- pdf.CellFormat(190, 6, "Custom Risk Rule", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, customRule.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(customRule.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, customRule.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, customRule.Category().RiskAssessment, "0", "0", false)
- }
-
- for _, key := range model.SortedKeysOfIndividualRiskCategories() {
- indivRiskCat := model.ParsedModelRoot.IndividualRiskCategories[key]
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- pdf.CellFormat(190, 3, indivRiskCat.Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, indivRiskCat.Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "I", fontSizeBody)
- pdf.CellFormat(190, 6, "Individual Risk Category", "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, indivRiskCat.STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(indivRiskCat.Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, indivRiskCat.DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, indivRiskCat.RiskAssessment, "0", "0", false)
- }
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, accidental_secret_leak.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+accidental_secret_leak.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, accidental_secret_leak.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, accidental_secret_leak.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(accidental_secret_leak.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, accidental_secret_leak.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, accidental_secret_leak.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, code_backdooring.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+code_backdooring.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, code_backdooring.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, code_backdooring.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(code_backdooring.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, code_backdooring.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, code_backdooring.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, container_baseimage_backdooring.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+container_baseimage_backdooring.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, container_baseimage_backdooring.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(container_baseimage_backdooring.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_baseimage_backdooring.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, container_platform_escape.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+container_platform_escape.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, container_platform_escape.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_platform_escape.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(container_platform_escape.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_platform_escape.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, container_platform_escape.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, cross_site_request_forgery.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+cross_site_request_forgery.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, cross_site_request_forgery.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_request_forgery.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(cross_site_request_forgery.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_request_forgery.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_request_forgery.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, cross_site_scripting.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+cross_site_scripting.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, cross_site_scripting.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_scripting.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(cross_site_scripting.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_scripting.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, cross_site_scripting.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, dos_risky_access_across_trust_boundary.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+dos_risky_access_across_trust_boundary.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, dos_risky_access_across_trust_boundary.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(dos_risky_access_across_trust_boundary.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, dos_risky_access_across_trust_boundary.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, incomplete_model.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+incomplete_model.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, incomplete_model.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, incomplete_model.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(incomplete_model.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, incomplete_model.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, incomplete_model.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, ldap_injection.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+ldap_injection.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, ldap_injection.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, ldap_injection.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(ldap_injection.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, ldap_injection.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, ldap_injection.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_authentication.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_authentication.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_authentication.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_authentication.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_authentication_second_factor.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_authentication_second_factor.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_authentication_second_factor.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_authentication_second_factor.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_authentication_second_factor.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_build_infrastructure.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_build_infrastructure.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_build_infrastructure.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_build_infrastructure.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_build_infrastructure.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_build_infrastructure.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_build_infrastructure.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_cloud_hardening.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_cloud_hardening.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_cloud_hardening.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_cloud_hardening.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_cloud_hardening.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_cloud_hardening.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_cloud_hardening.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_file_validation.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_file_validation.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_file_validation.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_file_validation.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_file_validation.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_file_validation.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_file_validation.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_hardening.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_hardening.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_hardening.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_hardening.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_hardening.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_hardening.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_hardening.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_identity_propagation.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_identity_propagation.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_identity_propagation.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_propagation.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_identity_propagation.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_propagation.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_propagation.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_identity_provider_isolation.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_identity_provider_isolation.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_identity_provider_isolation.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_identity_provider_isolation.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_provider_isolation.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_identity_store.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_identity_store.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_identity_store.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_store.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_identity_store.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_store.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_identity_store.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_network_segmentation.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_network_segmentation.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_network_segmentation.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_network_segmentation.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_network_segmentation.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_network_segmentation.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_network_segmentation.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_vault.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_vault.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_vault.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_vault.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_vault_isolation.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_vault_isolation.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_vault_isolation.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault_isolation.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_vault_isolation.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault_isolation.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_vault_isolation.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, missing_waf.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+missing_waf.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, missing_waf.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_waf.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(missing_waf.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_waf.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, missing_waf.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, mixed_targets_on_shared_runtime.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+mixed_targets_on_shared_runtime.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, mixed_targets_on_shared_runtime.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(mixed_targets_on_shared_runtime.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, mixed_targets_on_shared_runtime.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, path_traversal.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+path_traversal.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, path_traversal.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, path_traversal.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(path_traversal.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, path_traversal.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, path_traversal.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, push_instead_of_pull_deployment.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+push_instead_of_pull_deployment.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, push_instead_of_pull_deployment.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(push_instead_of_pull_deployment.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, push_instead_of_pull_deployment.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, search_query_injection.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+search_query_injection.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, search_query_injection.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, search_query_injection.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(search_query_injection.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, search_query_injection.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, search_query_injection.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, server_side_request_forgery.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+server_side_request_forgery.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, server_side_request_forgery.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, server_side_request_forgery.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(server_side_request_forgery.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, server_side_request_forgery.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, server_side_request_forgery.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, service_registry_poisoning.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+service_registry_poisoning.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, service_registry_poisoning.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, service_registry_poisoning.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(service_registry_poisoning.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, service_registry_poisoning.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, service_registry_poisoning.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, sql_nosql_injection.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+sql_nosql_injection.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, sql_nosql_injection.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, sql_nosql_injection.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(sql_nosql_injection.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, sql_nosql_injection.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, sql_nosql_injection.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unchecked_deployment.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unchecked_deployment.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unchecked_deployment.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unchecked_deployment.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unchecked_deployment.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unchecked_deployment.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unchecked_deployment.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unencrypted_asset.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unencrypted_asset.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unencrypted_asset.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_asset.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unencrypted_asset.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_asset.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_asset.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unencrypted_communication.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unencrypted_communication.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unencrypted_communication.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_communication.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unencrypted_communication.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_communication.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unencrypted_communication.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unguarded_access_from_internet.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unguarded_access_from_internet.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unguarded_access_from_internet.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unguarded_access_from_internet.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_access_from_internet.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unguarded_direct_datastore_access.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unguarded_direct_datastore_access.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unguarded_direct_datastore_access.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unguarded_direct_datastore_access.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unguarded_direct_datastore_access.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unnecessary_communication_link.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unnecessary_communication_link.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unnecessary_communication_link.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_communication_link.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unnecessary_communication_link.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_communication_link.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_communication_link.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unnecessary_data_asset.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unnecessary_data_asset.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unnecessary_data_asset.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_asset.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unnecessary_data_asset.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_asset.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_asset.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unnecessary_data_transfer.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unnecessary_data_transfer.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unnecessary_data_transfer.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unnecessary_data_transfer.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_data_transfer.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, unnecessary_technical_asset.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+unnecessary_technical_asset.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, unnecessary_technical_asset.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(unnecessary_technical_asset.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, unnecessary_technical_asset.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, untrusted_deserialization.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+untrusted_deserialization.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, untrusted_deserialization.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, untrusted_deserialization.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(untrusted_deserialization.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, untrusted_deserialization.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, untrusted_deserialization.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, wrong_communication_link_content.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+wrong_communication_link_content.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, wrong_communication_link_content.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_communication_link_content.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(wrong_communication_link_content.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_communication_link_content.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_communication_link_content.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, wrong_trust_boundary_content.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+wrong_trust_boundary_content.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, wrong_trust_boundary_content.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(wrong_trust_boundary_content.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, wrong_trust_boundary_content.Category().RiskAssessment, "0", "0", false)
-
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "B", fontSizeBody)
- if model.Contains(skippedRules, xml_external_entity.Category().Id) {
- skipped = "SKIPPED - "
- } else {
- skipped = ""
- }
- pdf.CellFormat(190, 3, skipped+xml_external_entity.Category().Title, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeSmall)
- pdf.CellFormat(190, 6, xml_external_entity.Category().Id, "0", 0, "", false, 0, "")
- pdf.Ln(-1)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "STRIDE:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, xml_external_entity.Category().STRIDE.Title(), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Description:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, firstParagraph(xml_external_entity.Category().Description), "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Detection:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, xml_external_entity.Category().DetectionLogic, "0", "0", false)
- pdfColorGray()
- pdf.CellFormat(5, 6, "", "0", 0, "", false, 0, "")
- pdf.CellFormat(25, 6, "Rating:", "0", 0, "", false, 0, "")
- pdfColorBlack()
- pdf.MultiCell(160, 6, xml_external_entity.Category().RiskAssessment, "0", "0", false)
-}
-
-func createTargetDescription(baseFolder string) {
- uni := pdf.UnicodeTranslatorFromDescriptor("")
- pdf.SetTextColor(0, 0, 0)
- title := "Application Overview"
- addHeadline(title, false)
- defineLinkTarget("{target-overview}")
- currentChapterTitleBreadcrumb = title
-
- var intro strings.Builder
- html := pdf.HTMLBasicNew()
-
- intro.WriteString("Business Criticality
")
- intro.WriteString("The overall business criticality of \"" + uni(model.ParsedModelRoot.Title) + "\" was rated as:
")
- html.Write(5, intro.String())
- criticality := model.ParsedModelRoot.BusinessCriticality
- intro.Reset()
- pdfColorGray()
- intro.WriteString("( ")
- if criticality == model.Archive {
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
- intro.WriteString("" + strings.ToUpper(model.Archive.String()) + "")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorGray()
- } else {
- intro.WriteString(model.Archive.String())
- }
- intro.WriteString(" | ")
- if criticality == model.Operational {
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
- intro.WriteString("" + strings.ToUpper(model.Operational.String()) + "")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorGray()
- } else {
- intro.WriteString(model.Operational.String())
- }
- intro.WriteString(" | ")
- if criticality == model.Important {
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
- intro.WriteString("" + strings.ToUpper(model.Important.String()) + "")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorGray()
- } else {
- intro.WriteString(model.Important.String())
- }
- intro.WriteString(" | ")
- if criticality == model.Critical {
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
- intro.WriteString("" + strings.ToUpper(model.Critical.String()) + "")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorGray()
- } else {
- intro.WriteString(model.Critical.String())
- }
- intro.WriteString(" | ")
- if criticality == model.MissionCritical {
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
- intro.WriteString("" + strings.ToUpper(model.MissionCritical.String()) + "")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorGray()
- } else {
- intro.WriteString(model.MissionCritical.String())
- }
- intro.WriteString(" )")
- html.Write(5, intro.String())
- intro.Reset()
- pdfColorBlack()
-
- intro.WriteString("
Business Overview
")
- intro.WriteString(uni(model.ParsedModelRoot.BusinessOverview.Description))
- html.Write(5, intro.String())
- intro.Reset()
- addCustomImages(model.ParsedModelRoot.BusinessOverview.Images, baseFolder, html)
-
- intro.WriteString("
Technical Overview
")
- intro.WriteString(uni(model.ParsedModelRoot.TechnicalOverview.Description))
- html.Write(5, intro.String())
- intro.Reset()
- addCustomImages(model.ParsedModelRoot.TechnicalOverview.Images, baseFolder, html)
-}
-
-func addCustomImages(customImages []map[string]string, baseFolder string, html gofpdf.HTMLBasicType) {
- var text strings.Builder
- for _, customImage := range customImages {
- for imageFilename := range customImage {
- imageFilenameWithoutPath := filepath.Base(imageFilename)
- // check JPEG, PNG or GIF
- extension := strings.ToLower(filepath.Ext(imageFilenameWithoutPath))
- if extension == ".jpeg" || extension == ".jpg" || extension == ".png" || extension == ".gif" {
- imageFullFilename := baseFolder + "/" + imageFilenameWithoutPath
- if pdf.GetY()+getHeightWhenWidthIsFix(imageFullFilename, 180) > 250 {
- pageBreak()
- pdf.SetY(36)
- } else {
- text.WriteString("
")
- }
- text.WriteString(customImage[imageFilename] + ":
")
- html.Write(5, text.String())
- text.Reset()
-
- var options gofpdf.ImageOptions
- options.ImageType = ""
- pdf.RegisterImage(imageFullFilename, "")
- pdf.ImageOptions(imageFullFilename, 15, pdf.GetY()+50, 170, 0, true, options, 0, "")
- } else {
- log.Print("Ignoring custom image file: ", imageFilenameWithoutPath)
- }
- }
- }
-}
-
-// fileExists checks if a file exists and is not a directory before we
-// try using it to prevent further errors.
-func fileExists(filename string) bool {
- info, err := os.Stat(filename)
- if os.IsNotExist(err) {
- return false
- }
- return !info.IsDir()
-}
-
-func getHeightWhenWidthIsFix(imageFullFilename string, width float64) float64 {
- if !fileExists(imageFullFilename) {
- panic(errors.New("Image file does not exist (or is not readable as file): " + filepath.Base(imageFullFilename)))
- }
- /* #nosec imageFullFilename is not tainted (see caller restricting it to image files of model folder only) */
- file, err := os.Open(imageFullFilename)
- defer file.Close()
- checkErr(err)
- image, _, err := image.DecodeConfig(file)
- checkErr(err)
- return float64(image.Height) / (float64(image.Width) / width)
-}
-
-func embedDataFlowDiagram(diagramFilenamePNG string) {
- pdf.SetTextColor(0, 0, 0)
- title := "Data-Flow Diagram"
- addHeadline(title, false)
- defineLinkTarget("{data-flow-diagram}")
- currentChapterTitleBreadcrumb = title
-
- var intro strings.Builder
- intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " +
- "overview of the data-flow between technical assets. " +
- "The RAA value is the calculated Relative Attacker Attractiveness in percent. " +
- "For a full high-resolution version of this diagram please refer to the PNG image file alongside this report.")
-
- html := pdf.HTMLBasicNew()
- html.Write(5, intro.String())
-
- // check to rotate the image if it is wider than high
- /* #nosec diagramFilenamePNG is not tainted */
- imagePath, _ := os.Open(diagramFilenamePNG)
- defer imagePath.Close()
- srcImage, _, _ := image.Decode(imagePath)
- srcDimensions := srcImage.Bounds()
- // wider than high?
- muchWiderThanHigh := srcDimensions.Dx() > int(float64(srcDimensions.Dy())*1.25)
- // fresh page (eventually landscape)?
- isLandscapePage = false
- /*
- pinnedWidth, pinnedHeight := 190.0, 210.0
- if dataFlowDiagramFullscreen {
- pinnedHeight = 235.0
- if muchWiderThanHigh {
- if allowedPdfLandscapePages {
- pinnedWidth = 275.0
- isLandscapePage = true
- pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4"))
- } else {
- // so rotate the image left by 90 degrees
- // ok, use temp PNG then
- // now rotate left by 90 degrees
- rotatedFile, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png")
- checkErr(err)
- defer os.Remove(rotatedFile.Name())
- dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx()))
- err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0})
- checkErr(err)
- newImage, _ := os.Create(rotatedFile.Name())
- defer newImage.Close()
- err = png.Encode(newImage, dstImage)
- checkErr(err)
- diagramFilenamePNG = rotatedFile.Name()
- }
- } else {
- pdf.AddPage()
- }
- } else {
- pdf.Ln(10)
- }*/
- // embed in PDF
- var options gofpdf.ImageOptions
- options.ImageType = ""
- pdf.RegisterImage(diagramFilenamePNG, "")
- var maxWidth, maxHeight, newWidth int
- var embedWidth, embedHeight float64
- if allowedPdfLandscapePages && muchWiderThanHigh {
- maxWidth, maxHeight = 275, 150
- isLandscapePage = true
- pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4"))
- } else {
- pdf.Ln(10)
- maxWidth, maxHeight = 190, 200 // reduced height as a text paragraph is above
- }
- newWidth = srcDimensions.Dx() / (srcDimensions.Dy() / maxHeight)
- if newWidth <= maxWidth {
- embedWidth, embedHeight = 0, float64(maxHeight)
- } else {
- embedWidth, embedHeight = float64(maxWidth), 0
- }
- pdf.ImageOptions(diagramFilenamePNG, 10, pdf.GetY(), embedWidth, embedHeight, true, options, 0, "")
- isLandscapePage = false
-
- // add diagram legend page
- if embedDiagramLegendPage {
- pdf.AddPage()
- gofpdi.UseImportedTemplate(pdf, diagramLegendTemplateId, 0, 0, 0, 300)
- }
-}
-
-func embedDataRiskMapping(diagramFilenamePNG string) {
- pdf.SetTextColor(0, 0, 0)
- title := "Data Mapping"
- addHeadline(title, false)
- defineLinkTarget("{data-risk-mapping}")
- currentChapterTitleBreadcrumb = title
-
- var intro strings.Builder
- intro.WriteString("The following diagram was generated by Threagile based on the model input and gives a high-level " +
- "distribution of data assets across technical assets. The color matches the identified data breach probability and risk level " +
- "(see the \"Data Breach Probabilities\" chapter for more details). " +
- "A solid line stands for data is stored by the asset and a dashed one means " +
- "data is processed by the asset. For a full high-resolution version of this diagram please refer to the PNG image " +
- "file alongside this report.")
-
- html := pdf.HTMLBasicNew()
- html.Write(5, intro.String())
-
- // TODO dedupe with code from other diagram embedding (almost same code)
- // check to rotate the image if it is wider than high
- /* #nosec diagramFilenamePNG is not tainted */
- imagePath, _ := os.Open(diagramFilenamePNG)
- defer imagePath.Close()
- srcImage, _, _ := image.Decode(imagePath)
- srcDimensions := srcImage.Bounds()
- // wider than high?
- widerThanHigh := srcDimensions.Dx() > srcDimensions.Dy()
- pinnedWidth, pinnedHeight := 190.0, 195.0
- // fresh page (eventually landscape)?
- isLandscapePage = false
- /*
- if dataFlowDiagramFullscreen {
- pinnedHeight = 235.0
- if widerThanHigh {
- if allowedPdfLandscapePages {
- pinnedWidth = 275.0
- isLandscapePage = true
- pdf.AddPageFormat("L", pdf.GetPageSizeStr("A4"))
- } else {
- // so rotate the image left by 90 degrees
- // ok, use temp PNG then
- // now rotate left by 90 degrees
- rotatedFile, err := ioutil.TempFile(model.TempFolder, "diagram-*-.png")
- checkErr(err)
- defer os.Remove(rotatedFile.Name())
- dstImage := image.NewRGBA(image.Rect(0, 0, srcDimensions.Dy(), srcDimensions.Dx()))
- err = graphics.Rotate(dstImage, srcImage, &graphics.RotateOptions{-1 * math.Pi / 2.0})
- checkErr(err)
- newImage, _ := os.Create(rotatedFile.Name())
- defer newImage.Close()
- err = png.Encode(newImage, dstImage)
- checkErr(err)
- diagramFilenamePNG = rotatedFile.Name()
- }
- } else {
- pdf.AddPage()
- }
- } else {
- pdf.Ln(10)
- }
- */
- // embed in PDF
- pdf.Ln(10)
- var options gofpdf.ImageOptions
- options.ImageType = ""
- pdf.RegisterImage(diagramFilenamePNG, "")
- if widerThanHigh {
- pinnedHeight = 0
- } else {
- pinnedWidth = 0
- }
- pdf.ImageOptions(diagramFilenamePNG, 10, pdf.GetY(), pinnedWidth, pinnedHeight, true, options, 0, "")
- isLandscapePage = false
-}
-
-func writeReportToFile(reportFilename string) {
- err := pdf.OutputFileAndClose(reportFilename)
- checkErr(err)
-}
-
-func addHeadline(headline string, small bool) {
- pdf.AddPage()
- gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300)
- fontSize := fontSizeHeadline
- if small {
- fontSize = fontSizeHeadlineSmall
- }
- pdf.SetFont("Helvetica", "B", float64(fontSize))
- pdf.Text(11, 40, headline)
- pdf.SetFont("Helvetica", "", fontSizeBody)
- pdf.SetX(17)
- pdf.SetY(46)
-}
-
-func pageBreak() {
- pdf.SetDrawColor(0, 0, 0)
- pdf.SetDashPattern([]float64{}, 0)
- pdf.AddPage()
- gofpdi.UseImportedTemplate(pdf, contentTemplateId, 0, 0, 0, 300)
- pdf.SetX(17)
- pdf.SetY(20)
-}
-func pageBreakInLists() {
- pageBreak()
- pdf.SetLineWidth(0.25)
- pdf.SetDrawColor(160, 160, 160)
- pdf.SetDashPattern([]float64{0.5, 0.5}, 0)
-}
-
-func pdfColorDataAssets() {
- pdf.SetTextColor(18, 36, 111)
-}
-func rgbHexColorDataAssets() string {
- return "#12246F"
-}
-
-func pdfColorTechnicalAssets() {
- pdf.SetTextColor(18, 36, 111)
-}
-func rgbHexColorTechnicalAssets() string {
- return "#12246F"
-}
-
-func pdfColorTrustBoundaries() {
- pdf.SetTextColor(18, 36, 111)
-}
-func rgbHexColorTrustBoundaries() string {
- return "#12246F"
-}
-
-func pdfColorSharedRuntime() {
- pdf.SetTextColor(18, 36, 111)
-}
-func rgbHexColorSharedRuntime() string {
- return "#12246F"
-}
-
-func pdfColorRiskFindings() {
- pdf.SetTextColor(160, 40, 30)
-}
-func rgbHexColorRiskFindings() string {
- return "#A0281E"
-}
-
-func pdfColorDisclaimer() {
- pdf.SetTextColor(140, 140, 140)
-}
-func rgbHexColorDisclaimer() string {
- return "#8C8C8C"
-}
-
-func pdfColorOutOfScope() {
- pdf.SetTextColor(127, 127, 127)
-}
-func rgbHexColorOutOfScope() string {
- return "#7F7F7F"
-}
-
-func pdfColorGray() {
- pdf.SetTextColor(80, 80, 80)
-}
-func rgbHexColorGray() string {
- return "#505050"
-}
-
-func pdfColorLightGray() {
- pdf.SetTextColor(100, 100, 100)
-}
-func rgbHexColorLightGray() string {
- return "#646464"
-}
-
-func pdfColorBlack() {
- pdf.SetTextColor(0, 0, 0)
-}
-func rgbHexColorBlack() string {
- return "#000000"
-}
-
-func pdfColorRed() {
- pdf.SetTextColor(255, 0, 0)
-}
-func rgbHexColorRed() string {
- return "#FF0000"
-}
diff --git a/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go b/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go
deleted file mode 100644
index 54627414..00000000
--- a/risks/built-in/dos-risky-access-across-trust-boundary/dos-risky-access-across-trust-boundary-rule.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package dos_risky_access_across_trust_boundary
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "dos-risky-access-across-trust-boundary",
- Title: "DoS-risky Access Across Trust-Boundary",
- Description: "Assets accessed across trust boundaries with critical or mission-critical availability rating " +
- "are more prone to Denial-of-Service (DoS) risks.",
- Impact: "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.",
- ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html",
- Action: "Anti-DoS Measures",
- Mitigation: "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. " +
- "Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. " +
- "Generally applying redundancy on the targeted technical asset reduces the risk of DoS.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.DenialOfService,
- DetectionLogic: "In-scope technical assets (excluding " + model.LoadBalancer.String() + ") with " +
- "availability rating of " + model.Critical.String() + " or higher which have incoming data-flows across a " +
- "network trust-boundary (excluding " + model.DevOps.String() + " usage).",
- RiskAssessment: "Matching technical assets with availability rating " +
- "of " + model.Critical.String() + " or higher are " +
- "at " + model.LowSeverity.String() + " risk. When the availability rating is " +
- model.MissionCritical.String() + " and neither a VPN nor IP filter for the incoming data-flow nor redundancy " +
- "for the asset is applied, the risk-rating is considered " + model.MediumSeverity.String() + ".", // TODO reduce also, when data-flow authenticated and encrypted?
- FalsePositives: "When the accessed target operations are not time- or resource-consuming.",
- ModelFailurePossibleReason: false,
- CWE: 400,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && technicalAsset.Technology != model.LoadBalancer &&
- technicalAsset.Availability >= model.Critical {
- for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
- sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId]
- if sourceAsset.Technology.IsTrafficForwarding() {
- // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human
- callersCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[sourceAsset.Id]
- for _, callersCommLink := range callersCommLinks {
- risks = checkRisk(technicalAsset, callersCommLink, sourceAsset.Title, risks)
- }
- } else {
- risks = checkRisk(technicalAsset, incomingAccess, "", risks)
- }
- }
- }
- }
- return risks
-}
-
-func checkRisk(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink, hopBetween string, risks []model.Risk) []model.Risk {
- if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() &&
- !incomingAccess.Protocol.IsProcessLocal() && incomingAccess.Usage != model.DevOps {
- highRisk := technicalAsset.Availability == model.MissionCritical &&
- !incomingAccess.VPN && !incomingAccess.IpFiltered && !technicalAsset.Redundant
- risks = append(risks, createRisk(technicalAsset, incomingAccess, hopBetween,
- model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk))
- }
- return risks
-}
-
-func createRisk(techAsset model.TechnicalAsset, dataFlow model.CommunicationLink, hopBetween string,
- clientOutsideTrustBoundary model.TechnicalAsset, moreRisky bool) model.Risk {
- impact := model.LowImpact
- if moreRisky {
- impact = model.MediumImpact
- }
- if len(hopBetween) > 0 {
- hopBetween = " forwarded via " + hopBetween + ""
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: "Denial-of-Service risky access of " + techAsset.Title + " by " + clientOutsideTrustBoundary.Title +
- " via " + dataFlow.Title + "" + hopBetween,
- MostRelevantTechnicalAssetId: techAsset.Id,
- MostRelevantCommunicationLinkId: dataFlow.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{},
- }
- risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataFlow.Id
- return risk
-}
diff --git a/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go b/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go
deleted file mode 100644
index e491655d..00000000
--- a/risks/built-in/missing-authentication-second-factor/missing-authentication-second-factor-rule.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package missing_authentication_second_factor
-
-import (
- "github.com/threagile/threagile/model"
- "github.com/threagile/threagile/risks/built-in/missing-authentication"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-authentication-second-factor",
- Title: "Missing Two-Factor Authentication (2FA)",
- Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests with " +
- "two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.",
- Impact: "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.",
- ASVS: "V2 - Authentication Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html",
- Action: "Authentication with Second Factor (2FA)",
- Mitigation: "Apply an authentication method to the technical asset protecting highly sensitive data via " +
- "two-factor authentication for human users.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.BusinessSide,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope technical assets (except " + model.LoadBalancer.String() + ", " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", " + model.IDS.String() + ", and " + model.IPS.String() + ") should authenticate incoming requests via two-factor authentication (2FA) " +
- "when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.",
- RiskAssessment: model.MediumSeverity.String(),
- FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
- "can be considered as false positives after individual review.",
- ModelFailurePossibleReason: false,
- CWE: 308,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.OutOfScope ||
- technicalAsset.Technology.IsTrafficForwarding() ||
- technicalAsset.Technology.IsUnprotectedCommsTolerated() {
- continue
- }
- if technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical ||
- technicalAsset.HighestAvailability() >= model.Critical ||
- technicalAsset.MultiTenant {
- // check each incoming data flow
- commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- for _, commLink := range commLinks {
- caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId]
- if caller.Technology.IsUnprotectedCommsTolerated() || caller.Type == model.Datastore {
- continue
- }
- if caller.UsedAsClientByHuman {
- moreRisky := commLink.HighestConfidentiality() >= model.Confidential ||
- commLink.HighestIntegrity() >= model.Critical
- if moreRisky && commLink.Authentication != model.TwoFactor {
- risks = append(risks, missing_authentication.CreateRisk(technicalAsset, commLink, commLink, "", model.MediumImpact, model.Unlikely, true, Category()))
- }
- } else if caller.Technology.IsTrafficForwarding() {
- // Now try to walk a call chain up (1 hop only) to find a caller's caller used by human
- callersCommLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[caller.Id]
- for _, callersCommLink := range callersCommLinks {
- callersCaller := model.ParsedModelRoot.TechnicalAssets[callersCommLink.SourceId]
- if callersCaller.Technology.IsUnprotectedCommsTolerated() || callersCaller.Type == model.Datastore {
- continue
- }
- if callersCaller.UsedAsClientByHuman {
- moreRisky := callersCommLink.HighestConfidentiality() >= model.Confidential ||
- callersCommLink.HighestIntegrity() >= model.Critical
- if moreRisky && callersCommLink.Authentication != model.TwoFactor {
- risks = append(risks, missing_authentication.CreateRisk(technicalAsset, commLink, callersCommLink, caller.Title, model.MediumImpact, model.Unlikely, true, Category()))
- }
- }
- }
- }
- }
- }
- }
- return risks
-}
diff --git a/risks/built-in/missing-authentication/missing-authentication-rule.go b/risks/built-in/missing-authentication/missing-authentication-rule.go
deleted file mode 100644
index 9d002242..00000000
--- a/risks/built-in/missing-authentication/missing-authentication-rule.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package missing_authentication
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-authentication",
- Title: "Missing Authentication",
- Description: "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes or stores sensitive data. ",
- Impact: "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.",
- ASVS: "V2 - Authentication Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
- Action: "Authentication of Incoming Requests",
- Mitigation: "Apply an authentication method to the technical asset. To protect highly sensitive data consider " +
- "the use of two-factor authentication for human users.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope technical assets (except " + model.LoadBalancer.String() + ", " + model.ReverseProxy.String() + ", " + model.ServiceRegistry.String() + ", " + model.WAF.String() + ", " + model.IDS.String() + ", and " + model.IPS.String() + " and in-process calls) should authenticate incoming requests when the asset processes or stores " +
- "sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).",
- RiskAssessment: "The risk rating (medium or high) " +
- "depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.",
- FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
- "can be considered as false positives after individual review.",
- ModelFailurePossibleReason: false,
- CWE: 306,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.OutOfScope || technicalAsset.Technology == model.LoadBalancer ||
- technicalAsset.Technology == model.ReverseProxy || technicalAsset.Technology == model.ServiceRegistry || technicalAsset.Technology == model.WAF || technicalAsset.Technology == model.IDS || technicalAsset.Technology == model.IPS {
- continue
- }
- if technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical ||
- technicalAsset.HighestAvailability() >= model.Critical ||
- technicalAsset.MultiTenant {
- // check each incoming data flow
- commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- for _, commLink := range commLinks {
- caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId]
- if caller.Technology.IsUnprotectedCommsTolerated() || caller.Type == model.Datastore {
- continue
- }
- highRisk := commLink.HighestConfidentiality() == model.StrictlyConfidential ||
- commLink.HighestIntegrity() == model.MissionCritical
- lowRisk := commLink.HighestConfidentiality() <= model.Internal &&
- commLink.HighestIntegrity() == model.Operational
- impact := model.MediumImpact
- if highRisk {
- impact = model.HighImpact
- } else if lowRisk {
- impact = model.LowImpact
- }
- if commLink.Authentication == model.NoneAuthentication && !commLink.Protocol.IsProcessLocal() {
- risks = append(risks, CreateRisk(technicalAsset, commLink, commLink, "", impact, model.Likely, false, Category()))
- }
- }
- }
- }
- return risks
-}
-
-func CreateRisk(technicalAsset model.TechnicalAsset, incomingAccess, incomingAccessOrigin model.CommunicationLink, hopBetween string,
- impact model.RiskExploitationImpact, likelihood model.RiskExploitationLikelihood, twoFactor bool, category model.RiskCategory) model.Risk {
- factorString := ""
- if twoFactor {
- factorString = "Two-Factor "
- }
- if len(hopBetween) > 0 {
- hopBetween = "forwarded via " + hopBetween + " "
- }
- risk := model.Risk{
- Category: category,
- Severity: model.CalculateSeverity(likelihood, impact),
- ExploitationLikelihood: likelihood,
- ExploitationImpact: impact,
- Title: "Missing " + factorString + "Authentication covering communication link " + incomingAccess.Title + " " +
- "from " + model.ParsedModelRoot.TechnicalAssets[incomingAccessOrigin.SourceId].Title + " " + hopBetween +
- "to " + technicalAsset.Title + "",
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- MostRelevantCommunicationLinkId: incomingAccess.Id,
- DataBreachProbability: model.Possible,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + incomingAccess.Id + "@" + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/missing-hardening/missing-hardening-rule.go b/risks/built-in/missing-hardening/missing-hardening-rule.go
deleted file mode 100644
index b4795740..00000000
--- a/risks/built-in/missing-hardening/missing-hardening-rule.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package missing_hardening
-
-import (
- "github.com/threagile/threagile/model"
- "strconv"
-)
-
-const raaLimit = 55
-const raaLimitReduced = 40
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-hardening",
- Title: "Missing Hardening",
- Description: "Technical assets with a Relative Attacker Attractiveness (RAA) value of " + strconv.Itoa(raaLimit) + " % or higher should be " +
- "explicitly hardened taking best practices and vendor hardening guides into account.",
- Impact: "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.",
- ASVS: "V14 - Configuration Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
- Action: "System Hardening",
- Mitigation: "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor " +
- "recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.Tampering,
- DetectionLogic: "In-scope technical assets with RAA values of " + strconv.Itoa(raaLimit) + " % or higher. " +
- "Generally for high-value targets like datastores, application servers, identity providers and ERP systems this limit is reduced to " + strconv.Itoa(raaLimitReduced) + " %",
- RiskAssessment: "The risk rating depends on the sensitivity of the data processed or stored in the technical asset.",
- FalsePositives: "Usually no false positives.",
- ModelFailurePossibleReason: false,
- CWE: 16,
- }
-}
-
-func SupportedTags() []string {
- return []string{"tomcat"}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope {
- if technicalAsset.RAA >= raaLimit || (technicalAsset.RAA >= raaLimitReduced &&
- (technicalAsset.Type == model.Datastore || technicalAsset.Technology == model.ApplicationServer || technicalAsset.Technology == model.IdentityProvider || technicalAsset.Technology == model.ERP)) {
- risks = append(risks, createRisk(technicalAsset))
- }
- }
- }
- return risks
-}
-
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
- title := "Missing Hardening risk at " + technicalAsset.Title + ""
- impact := model.LowImpact
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
- impact = model.MediumImpact
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Likely, impact),
- ExploitationLikelihood: model.Likely,
- ExploitationImpact: impact,
- Title: title,
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go b/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go
deleted file mode 100644
index 31f10700..00000000
--- a/risks/built-in/missing-identity-propagation/missing-identity-propagation-rule.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package missing_identity_propagation
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-identity-propagation",
- Title: "Missing Identity Propagation",
- Description: "Technical assets (especially multi-tenant systems), which usually process data for endusers should " +
- "authorize every request based on the identity of the enduser when the data flow is authenticated (i.e. non-public). " +
- "For DevOps usages at least a technical-user authorization is required.",
- Impact: "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within " +
- "the system due to missing resource-based authorization checks.",
- ASVS: "V4 - Access Control Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
- Action: "Identity Propagation and Resource-based Authorization",
- Mitigation: "When processing requests for endusers if possible authorize in the backend against the propagated " +
- "identity of the enduser. This can be achieved in passing JWTs or similar tokens and checking them in the backend " +
- "services. For DevOps usages apply at least a technical-user authorization.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope service-like technical assets which usually process data based on enduser requests, if authenticated " +
- "(i.e. non-public), should authorize incoming requests based on the propagated enduser identity when their rating is sensitive. " +
- "This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). " +
- "DevOps usages are exempted from this risk.",
- RiskAssessment: "The risk rating (medium or high) " +
- "depends on the confidentiality, integrity, and availability rating of the technical asset.",
- FalsePositives: "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) " +
- "can be considered as false positives after individual review.",
- ModelFailurePossibleReason: false,
- CWE: 284,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if technicalAsset.OutOfScope {
- continue
- }
- if technicalAsset.Technology.IsUsuallyProcessingEnduserRequests() &&
- (technicalAsset.Confidentiality >= model.Confidential ||
- technicalAsset.Integrity >= model.Critical ||
- technicalAsset.Availability >= model.Critical ||
- (technicalAsset.MultiTenant &&
- (technicalAsset.Confidentiality >= model.Restricted ||
- technicalAsset.Integrity >= model.Important ||
- technicalAsset.Availability >= model.Important))) {
- // check each incoming authenticated data flow
- commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- for _, commLink := range commLinks {
- caller := model.ParsedModelRoot.TechnicalAssets[commLink.SourceId]
- if !caller.Technology.IsUsuallyAbleToPropagateIdentityToOutgoingTargets() || caller.Type == model.Datastore {
- continue
- }
- if commLink.Authentication != model.NoneAuthentication &&
- commLink.Authorization != model.EnduserIdentityPropagation {
- if commLink.Usage == model.DevOps && commLink.Authorization != model.NoneAuthorization {
- continue
- }
- highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical ||
- technicalAsset.Availability == model.MissionCritical
- risks = append(risks, createRisk(technicalAsset, commLink, highRisk))
- }
- }
- }
- }
- return risks
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink, moreRisky bool) model.Risk {
- impact := model.LowImpact
- if moreRisky {
- impact = model.MediumImpact
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: "Missing Enduser Identity Propagation over communication link " + incomingAccess.Title + " " +
- "from " + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Title + " " +
- "to " + technicalAsset.Title + "",
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- MostRelevantCommunicationLinkId: incomingAccess.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + incomingAccess.Id + "@" + model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/missing-identity-store/missing-identity-store-rule.go b/risks/built-in/missing-identity-store/missing-identity-store-rule.go
deleted file mode 100644
index 9096e320..00000000
--- a/risks/built-in/missing-identity-store/missing-identity-store-rule.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package missing_identity_store
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-identity-store",
- Title: "Missing Identity Store",
- Description: "The modeled architecture does not contain an identity store, which might be the risk of a model missing " +
- "critical assets (and thus not seeing their risks).",
- Impact: "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store " +
- "that is currently missing in the model.",
- ASVS: "V2 - Authentication Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
- Action: "Identity Store",
- Mitigation: "Include an identity store in the model if the application has a login.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Spoofing,
- DetectionLogic: "Models with authenticated data-flows authorized via enduser-identity missing an in-scope identity store.",
- RiskAssessment: "The risk rating depends on the sensitivity of the enduser-identity authorized technical assets and " +
- "their data assets processed and stored.",
- FalsePositives: "Models only offering data/services without any real authentication need " +
- "can be considered as false positives after individual review.",
- ModelFailurePossibleReason: true,
- CWE: 287,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
- if !technicalAsset.OutOfScope &&
- (technicalAsset.Technology == model.IdentityStoreLDAP || technicalAsset.Technology == model.IdentityStoreDatabase) {
- // everything fine, no risk, as we have an in-scope identity store in the model
- return risks
- }
- }
- // now check if we have enduser-identity authorized communication links, then it's a risk
- riskIdentified := false
- var mostRelevantAsset model.TechnicalAsset
- impact := model.LowImpact
- for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- for _, commLink := range technicalAsset.CommunicationLinksSorted() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
- if commLink.Authorization == model.EnduserIdentityPropagation {
- riskIdentified = true
- targetAsset := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId]
- if impact == model.LowImpact {
- mostRelevantAsset = targetAsset
- if targetAsset.HighestConfidentiality() >= model.Confidential ||
- targetAsset.HighestIntegrity() >= model.Critical ||
- targetAsset.HighestAvailability() >= model.Critical {
- impact = model.MediumImpact
- }
- }
- if targetAsset.Confidentiality >= model.Confidential ||
- targetAsset.Integrity >= model.Critical ||
- targetAsset.Availability >= model.Critical {
- impact = model.MediumImpact
- }
- // just for referencing the most interesting asset
- if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
- mostRelevantAsset = technicalAsset
- }
- }
- }
- }
- if riskIdentified {
- risks = append(risks, createRisk(mostRelevantAsset, impact))
- }
- return risks
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk {
- title := "Missing Identity Store in the threat model (referencing asset " + technicalAsset.Title + " as an example)"
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: title,
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{},
- }
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go b/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go
deleted file mode 100644
index 495a3215..00000000
--- a/risks/built-in/missing-network-segmentation/missing-network-segmentation-rule.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package missing_network_segmentation
-
-import (
- "github.com/threagile/threagile/model"
- "sort"
-)
-
-const raaLimit = 50
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-network-segmentation",
- Title: "Missing Network Segmentation",
- Description: "Highly sensitive assets and/or datastores residing in the same network segment than other " +
- "lower sensitive assets (like webservers or content management systems etc.) should be better protected " +
- "by a network segmentation trust-boundary.",
- Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " +
- "more valuable targets, as they are not separated by network segmentation.",
- ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
- Action: "Network Segmentation",
- Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive assets and/or datastores.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope technical assets with high sensitivity and RAA values as well as datastores " +
- "when surrounded by assets (without a network trust-boundary in-between) which are of type " + model.ClientSystem.String() + ", " +
- model.WebServer.String() + ", " + model.WebApplication.String() + ", " + model.CMS.String() + ", " + model.WebServiceREST.String() + ", " + model.WebServiceSOAP.String() + ", " +
- model.BuildPipeline.String() + ", " + model.SourcecodeRepository.String() + ", " + model.Monitoring.String() + ", or similar and there is no direct connection between these " +
- "(hence no requirement to be so close to each other).",
- RiskAssessment: "Default is " + model.LowSeverity.String() + " risk. The risk is increased to " + model.MediumSeverity.String() + " when the asset missing the " +
- "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".",
- FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " +
- "containing/processing highly sensitive data.",
- ModelFailurePossibleReason: false,
- CWE: 1008,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- // first create them in memory (see the link replacement below for nested trust boundaries) - otherwise in Go ranging over map is random order
- // range over them in sorted (hence re-producible) way:
- keys := make([]string, 0)
- for k, _ := range model.ParsedModelRoot.TechnicalAssets {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, key := range keys {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[key]
- if !technicalAsset.OutOfScope && technicalAsset.Technology != model.ReverseProxy && technicalAsset.Technology != model.WAF && technicalAsset.Technology != model.IDS && technicalAsset.Technology != model.IPS && technicalAsset.Technology != model.ServiceRegistry {
- if technicalAsset.RAA >= raaLimit && (technicalAsset.Type == model.Datastore || technicalAsset.Confidentiality >= model.Confidential ||
- technicalAsset.Integrity >= model.Critical || technicalAsset.Availability >= model.Critical) {
- // now check for any other same-network assets of certain types which have no direct connection
- for _, sparringAssetCandidateId := range keys { // so inner loop again over all assets
- if technicalAsset.Id != sparringAssetCandidateId {
- sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId]
- if sparringAssetCandidate.Technology.IsLessProtectedType() &&
- technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) &&
- !technicalAsset.HasDirectConnection(sparringAssetCandidateId) &&
- !sparringAssetCandidate.Technology.IsCloseToHighValueTargetsTolerated() {
- highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical || technicalAsset.Availability == model.MissionCritical
- risks = append(risks, createRisk(technicalAsset, highRisk))
- break
- }
- }
- }
- }
- }
- }
- return risks
-}
-
-func createRisk(techAsset model.TechnicalAsset, moreRisky bool) model.Risk {
- impact := model.LowImpact
- if moreRisky {
- impact = model.MediumImpact
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: "Missing Network Segmentation to further encapsulate and protect " + techAsset.Title + " against unrelated " +
- "lower protected assets in the same network segment, which might be easier to compromise by attackers",
- MostRelevantTechnicalAssetId: techAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{techAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id
- return risk
-}
diff --git a/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go b/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go
deleted file mode 100644
index 8ef10cdf..00000000
--- a/risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package missing_vault_isolation
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "missing-vault-isolation",
- Title: "Missing Vault Isolation",
- Description: "Highly sensitive vault assets and their datastores should be isolated from other assets " +
- "by their own network segmentation trust-boundary (" + model.ExecutionEnvironment.String() + " boundaries do not count as network isolation).",
- Impact: "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards " +
- "highly sensitive vault assets and their datastores, as they are not separated by network segmentation.",
- ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
- Action: "Network Segmentation",
- Mitigation: "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their datastores.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope vault assets " +
- "when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). " +
- "This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).",
- RiskAssessment: "Default is " + model.MediumImpact.String() + " impact. The impact is increased to " + model.HighImpact.String() + " when the asset missing the " +
- "trust-boundary protection is rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + ".",
- FalsePositives: "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were " +
- "vaults with data of highest sensitivity.",
- ModelFailurePossibleReason: false,
- CWE: 1008,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
- if !technicalAsset.OutOfScope && technicalAsset.Technology == model.Vault {
- moreImpact := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical ||
- technicalAsset.Availability == model.MissionCritical
- sameExecutionEnv := false
- createRiskEntry := false
- // now check for any other same-network assets of non-vault-related types
- for sparringAssetCandidateId, _ := range model.ParsedModelRoot.TechnicalAssets { // so inner loop again over all assets
- if technicalAsset.Id != sparringAssetCandidateId {
- sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId]
- if sparringAssetCandidate.Technology != model.Vault && !isVaultStorage(technicalAsset, sparringAssetCandidate) {
- if technicalAsset.IsSameExecutionEnvironment(sparringAssetCandidateId) {
- createRiskEntry = true
- sameExecutionEnv = true
- } else if technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) {
- createRiskEntry = true
- }
- }
- }
- }
- if createRiskEntry {
- risks = append(risks, createRisk(technicalAsset, moreImpact, sameExecutionEnv))
- }
- }
- }
- return risks
-}
-
-func isVaultStorage(vault model.TechnicalAsset, storage model.TechnicalAsset) bool {
- return storage.Type == model.Datastore && vault.HasDirectConnection(storage.Id)
-}
-
-func createRisk(techAsset model.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) model.Risk {
- impact := model.MediumImpact
- likelihood := model.Unlikely
- others := "in the same network segment"
- if moreImpact {
- impact = model.HighImpact
- }
- if sameExecutionEnv {
- likelihood = model.Likely
- others = "in the same execution environment"
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(likelihood, impact),
- ExploitationLikelihood: likelihood,
- ExploitationImpact: impact,
- Title: "Missing Vault Isolation to further encapsulate and protect vault-related asset " + techAsset.Title + " against unrelated " +
- "lower protected assets " + others + ", which might be easier to compromise by attackers",
- MostRelevantTechnicalAssetId: techAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{techAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id
- return risk
-}
diff --git a/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go b/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go
deleted file mode 100644
index 5cee3a3e..00000000
--- a/risks/built-in/service-registry-poisoning/service-registry-poisoning-rule.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package service_registry_poisoning
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "service-registry-poisoning",
- Title: "Service Registry Poisoning",
- Description: "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.",
- Impact: "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or " +
- "malicious lookup and config data leading to breach of sensitive data.",
- ASVS: "V10 - Malicious Code Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
- Action: "Service Registry Integrity Check",
- Mitigation: "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.Spoofing,
- DetectionLogic: "In-scope service registries.",
- RiskAssessment: "The risk rating depends on the sensitivity of the technical assets accessing the service registry " +
- "as well as the data assets processed or stored.",
- FalsePositives: "Service registries not used for service discovery " +
- "can be considered as false positives after individual review.",
- ModelFailurePossibleReason: false,
- CWE: 693,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && technicalAsset.Technology == model.ServiceRegistry {
- incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- risks = append(risks, createRisk(technicalAsset, incomingFlows))
- }
- }
- return risks
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, incomingFlows []model.CommunicationLink) model.Risk {
- title := "Service Registry Poisoning risk at " + technicalAsset.Title + ""
- impact := model.LowImpact
-
- for _, incomingFlow := range incomingFlows {
- caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
- if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical || technicalAsset.HighestAvailability() == model.MissionCritical ||
- caller.HighestConfidentiality() == model.StrictlyConfidential || caller.HighestIntegrity() == model.MissionCritical || caller.HighestAvailability() == model.MissionCritical ||
- incomingFlow.HighestConfidentiality() == model.StrictlyConfidential || incomingFlow.HighestIntegrity() == model.MissionCritical || incomingFlow.HighestAvailability() == model.MissionCritical {
- impact = model.MediumImpact
- break
- }
- }
-
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: title,
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, // TODO: find all service-lookup-using tech assets, which then might use spoofed lookups?
- }
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go b/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go
deleted file mode 100644
index cdf23c6b..00000000
--- a/risks/built-in/unencrypted-asset/unencrypted-asset-rule.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package unencrypted_asset
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "unencrypted-asset",
- Title: "Unencrypted Technical Assets",
- Description: "Due to the confidentiality rating of the technical asset itself and/or the processed data assets " +
- "this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.",
- Impact: "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.",
- ASVS: "V6 - Stored Cryptography Verification Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html",
- Action: "Encryption of Technical Asset",
- Mitigation: "Apply encryption to the technical asset.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Operations,
- STRIDE: model.InformationDisclosure,
- DetectionLogic: "In-scope unencrypted technical assets (excluding " + model.ReverseProxy.String() +
- ", " + model.LoadBalancer.String() + ", " + model.WAF.String() + ", " + model.IDS.String() +
- ", " + model.IPS.String() + " and embedded components like " + model.Library.String() + ") " +
- "storing data assets rated at least as " + model.Confidential.String() + " or " + model.Critical.String() + ". " +
- "For technical assets storing data assets rated as " + model.StrictlyConfidential.String() + " or " + model.MissionCritical.String() + " the " +
- "encryption must be of type " + model.DataWithEnduserIndividualKey.String() + ".",
- RiskAssessment: "Depending on the confidentiality rating of the stored data-assets either medium or high risk.",
- FalsePositives: "When all sensitive data stored within the asset is already fully encrypted on document or data level.",
- ModelFailurePossibleReason: false,
- CWE: 311,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-// check for technical assets that should be encrypted due to their confidentiality
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && !IsEncryptionWaiver(technicalAsset) &&
- (technicalAsset.HighestConfidentiality() >= model.Confidential ||
- technicalAsset.HighestIntegrity() >= model.Critical) {
- verySensitive := technicalAsset.HighestConfidentiality() == model.StrictlyConfidential ||
- technicalAsset.HighestIntegrity() == model.MissionCritical
- requiresEnduserKey := verySensitive && technicalAsset.Technology.IsUsuallyStoringEnduserData()
- if technicalAsset.Encryption == model.NoneEncryption {
- impact := model.MediumImpact
- if verySensitive {
- impact = model.HighImpact
- }
- risks = append(risks, createRisk(technicalAsset, impact, requiresEnduserKey))
- } else if requiresEnduserKey &&
- (technicalAsset.Encryption == model.Transparent || technicalAsset.Encryption == model.DataWithSymmetricSharedKey || technicalAsset.Encryption == model.DataWithAsymmetricSharedKey) {
- risks = append(risks, createRisk(technicalAsset, model.MediumImpact, requiresEnduserKey))
- }
- }
- }
- return risks
-}
-
-// Simple routing assets like 'Reverse Proxy' or 'Load Balancer' usually don't have their own storage and thus have no
-// encryption requirement for the asset itself (though for the communication, but that's a different rule)
-func IsEncryptionWaiver(asset model.TechnicalAsset) bool {
- return asset.Technology == model.ReverseProxy || asset.Technology == model.LoadBalancer ||
- asset.Technology == model.WAF || asset.Technology == model.IDS || asset.Technology == model.IPS ||
- asset.Technology.IsEmbeddedComponent()
-}
-
-func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact, requiresEnduserKey bool) model.Risk {
- title := "Unencrypted Technical Asset named " + technicalAsset.Title + ""
- if requiresEnduserKey {
- title += " missing enduser-individual encryption with " + model.DataWithEnduserIndividualKey.String()
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Unlikely, impact),
- ExploitationLikelihood: model.Unlikely,
- ExploitationImpact: impact,
- Title: title,
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go b/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go
deleted file mode 100644
index a6e00bec..00000000
--- a/risks/built-in/unguarded-access-from-internet/unguarded-access-from-internet-rule.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package unguarded_access_from_internet
-
-import (
- "github.com/threagile/threagile/model"
- "sort"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "unguarded-access-from-internet",
- Title: "Unguarded Access From Internet",
- Description: "Internet-exposed assets must be guarded by a protecting service, application, " +
- "or reverse-proxy.",
- Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between " +
- "due to them being directly exposed on the internet.",
- ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
- Action: "Encapsulation of Technical Asset",
- Mitigation: "Encapsulate the asset behind a guarding service, application, or reverse-proxy. " +
- "For admin maintenance a bastion-host should be used as a jump-server. " +
- "For file transfer a store-and-forward-host should be used as an indirect file exchange platform.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope technical assets (excluding " + model.LoadBalancer.String() + ") with confidentiality rating " +
- "of " + model.Confidential.String() + " (or higher) or with integrity rating of " + model.Critical.String() + " (or higher) when " +
- "accessed directly from the internet. All " +
- model.WebServer.String() + ", " + model.WebApplication.String() + ", " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", and " + model.Gateway.String() + " assets are exempted from this risk when " +
- "they do not consist of custom developed code and " +
- "the data-flow only consists of HTTP or FTP protocols. Access from " + model.Monitoring.String() + " systems " +
- "as well as VPN-protected connections are exempted.",
- RiskAssessment: "The matching technical assets are at " + model.LowSeverity.String() + " risk. When either the " +
- "confidentiality rating is " + model.StrictlyConfidential.String() + " or the integrity rating " +
- "is " + model.MissionCritical.String() + ", the risk-rating is considered " + model.MediumSeverity.String() + ". " +
- "For assets with RAA values higher than 40 % the risk-rating increases.",
- FalsePositives: "When other means of filtering client requests are applied equivalent of " + model.ReverseProxy.String() + ", " + model.WAF.String() + ", or " + model.Gateway.String() + " components.",
- ModelFailurePossibleReason: false,
- CWE: 501,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope {
- commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
- sort.Sort(model.ByTechnicalCommunicationLinkIdSort(commLinks))
- for _, incomingAccess := range commLinks {
- if technicalAsset.Technology != model.LoadBalancer {
- if !technicalAsset.CustomDevelopedParts {
- if (technicalAsset.Technology == model.WebServer || technicalAsset.Technology == model.WebApplication || technicalAsset.Technology == model.ReverseProxy || technicalAsset.Technology == model.WAF || technicalAsset.Technology == model.Gateway) &&
- (incomingAccess.Protocol == model.HTTP || incomingAccess.Protocol == model.HTTPS) {
- continue
- }
- if technicalAsset.Technology == model.Gateway &&
- (incomingAccess.Protocol == model.FTP || incomingAccess.Protocol == model.FTPS || incomingAccess.Protocol == model.SFTP) {
- continue
- }
- }
- if model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId].Technology == model.Monitoring ||
- incomingAccess.VPN {
- continue
- }
- if technicalAsset.Confidentiality >= model.Confidential || technicalAsset.Integrity >= model.Critical {
- sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId]
- if sourceAsset.Internet {
- highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical
- risks = append(risks, createRisk(technicalAsset, incomingAccess,
- model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk))
- }
- }
- }
- }
- }
- }
- return risks
-}
-
-func createRisk(dataStore model.TechnicalAsset, dataFlow model.CommunicationLink,
- clientFromInternet model.TechnicalAsset, moreRisky bool) model.Risk {
- impact := model.LowImpact
- if moreRisky || dataStore.RAA > 40 {
- impact = model.MediumImpact
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.VeryLikely, impact),
- ExploitationLikelihood: model.VeryLikely,
- ExploitationImpact: impact,
- Title: "Unguarded Access from Internet of " + dataStore.Title + " by " +
- clientFromInternet.Title + "" + " via " + dataFlow.Title + "",
- MostRelevantTechnicalAssetId: dataStore.Id,
- MostRelevantCommunicationLinkId: dataFlow.Id,
- DataBreachProbability: model.Possible,
- DataBreachTechnicalAssetIDs: []string{dataStore.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + dataStore.Id + "@" + clientFromInternet.Id + "@" + dataFlow.Id
- return risk
-}
diff --git a/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go b/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go
deleted file mode 100644
index 5fa8f9e2..00000000
--- a/risks/built-in/unguarded-direct-datastore-access/unguarded-direct-datastore-access-rule.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package unguarded_direct_datastore_access
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-func Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "unguarded-direct-datastore-access",
- Title: "Unguarded Direct Datastore Access",
- Description: "Datastores accessed across trust boundaries must be guarded by some protecting service or application.",
- Impact: "If this risk is unmitigated, attackers might be able to directly attack sensitive datastores without any protecting components in-between.",
- ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
- CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
- Action: "Encapsulation of Datastore",
- Mitigation: "Encapsulate the datastore access behind a guarding service or application.",
- Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
- Function: model.Architecture,
- STRIDE: model.ElevationOfPrivilege,
- DetectionLogic: "In-scope technical assets of type " + model.Datastore.String() + " (except " + model.IdentityStoreLDAP.String() + " when accessed from " + model.IdentityProvider.String() + " and " + model.FileServer.String() + " when accessed via file transfer protocols) with confidentiality rating " +
- "of " + model.Confidential.String() + " (or higher) or with integrity rating of " + model.Critical.String() + " (or higher) " +
- "which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.", // TODO new rule "missing bastion host"?
- RiskAssessment: "The matching technical assets are at " + model.LowSeverity.String() + " risk. When either the " +
- "confidentiality rating is " + model.StrictlyConfidential.String() + " or the integrity rating " +
- "is " + model.MissionCritical.String() + ", the risk-rating is considered " + model.MediumSeverity.String() + ". " +
- "For assets with RAA values higher than 40 % the risk-rating increases.",
- FalsePositives: "When the caller is considered fully trusted as if it was part of the datastore itself.",
- ModelFailurePossibleReason: false,
- CWE: 501,
- }
-}
-
-func SupportedTags() []string {
- return []string{}
-}
-
-// check for datastores that should not be accessed directly across trust boundaries
-func GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, id := range model.SortedTechnicalAssetIDs() {
- technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
- if !technicalAsset.OutOfScope && technicalAsset.Type == model.Datastore {
- for _, incomingAccess := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
- sourceAsset := model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId]
- if (technicalAsset.Technology == model.IdentityStoreLDAP || technicalAsset.Technology == model.IdentityStoreDatabase) &&
- sourceAsset.Technology == model.IdentityProvider {
- continue
- }
- if technicalAsset.Confidentiality >= model.Confidential || technicalAsset.Integrity >= model.Critical {
- if incomingAccess.IsAcrossTrustBoundaryNetworkOnly() && !FileServerAccessViaFTP(technicalAsset, incomingAccess) &&
- incomingAccess.Usage != model.DevOps && !model.IsSharingSameParentTrustBoundary(technicalAsset, sourceAsset) {
- highRisk := technicalAsset.Confidentiality == model.StrictlyConfidential ||
- technicalAsset.Integrity == model.MissionCritical
- risks = append(risks, createRisk(technicalAsset, incomingAccess,
- model.ParsedModelRoot.TechnicalAssets[incomingAccess.SourceId], highRisk))
- }
- }
- }
- }
- }
- return risks
-}
-
-func FileServerAccessViaFTP(technicalAsset model.TechnicalAsset, incomingAccess model.CommunicationLink) bool {
- return technicalAsset.Technology == model.FileServer &&
- (incomingAccess.Protocol == model.FTP || incomingAccess.Protocol == model.FTPS || incomingAccess.Protocol == model.SFTP)
-}
-
-func createRisk(dataStore model.TechnicalAsset, dataFlow model.CommunicationLink, clientOutsideTrustBoundary model.TechnicalAsset, moreRisky bool) model.Risk {
- impact := model.LowImpact
- if moreRisky || dataStore.RAA > 40 {
- impact = model.MediumImpact
- }
- risk := model.Risk{
- Category: Category(),
- Severity: model.CalculateSeverity(model.Likely, impact),
- ExploitationLikelihood: model.Likely,
- ExploitationImpact: impact,
- Title: "Unguarded Direct Datastore Access of " + dataStore.Title + " by " +
- clientOutsideTrustBoundary.Title + " via " + dataFlow.Title + "",
- MostRelevantTechnicalAssetId: dataStore.Id,
- MostRelevantCommunicationLinkId: dataFlow.Id,
- DataBreachProbability: model.Improbable,
- DataBreachTechnicalAssetIDs: []string{dataStore.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + dataFlow.Id + "@" + clientOutsideTrustBoundary.Id + "@" + dataStore.Id
- return risk
-}
diff --git a/risks/custom/demo/demo-rule.go b/risks/custom/demo/demo-rule.go
deleted file mode 100644
index 5eb8d672..00000000
--- a/risks/custom/demo/demo-rule.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package main
-
-import (
- "github.com/threagile/threagile/model"
-)
-
-type customRiskRule string
-
-// exported as symbol (here simply as variable to interface to bundle many functions under one symbol) named "CustomRiskRule"
-var CustomRiskRule customRiskRule
-
-func (r customRiskRule) Category() model.RiskCategory {
- return model.RiskCategory{
- Id: "demo",
- Title: "Just a Demo",
- Description: "Demo Description",
- Impact: "Demo Impact",
- ASVS: "Demo ASVS",
- CheatSheet: "https://example.com",
- Action: "Demo Action",
- Mitigation: "Demo Mitigation",
- Check: "Demo Check",
- Function: model.Development,
- STRIDE: model.Tampering,
- DetectionLogic: "Demo Detection",
- RiskAssessment: "Demo Risk Assessment",
- FalsePositives: "Demo False Positive.",
- ModelFailurePossibleReason: false,
- CWE: 0,
- }
-}
-
-func (r customRiskRule) SupportedTags() []string {
- return []string{"demo tag"}
-}
-
-func (r customRiskRule) GenerateRisks() []model.Risk {
- risks := make([]model.Risk, 0)
- for _, techAsset := range model.ParsedModelRoot.TechnicalAssets {
- risks = append(risks, createRisk(techAsset))
- }
- return risks
-}
-
-func createRisk(technicalAsset model.TechnicalAsset) model.Risk {
- risk := model.Risk{
- Category: CustomRiskRule.Category(),
- Severity: model.CalculateSeverity(model.VeryLikely, model.MediumImpact),
- ExploitationLikelihood: model.VeryLikely,
- ExploitationImpact: model.MediumImpact,
- Title: "Demo risk at " + technicalAsset.Title + "",
- MostRelevantTechnicalAssetId: technicalAsset.Id,
- DataBreachProbability: model.Possible,
- DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
- }
- risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
- return risk
-}
diff --git a/support/render-data-asset-diagram.sh b/support/render-data-asset-diagram.sh
deleted file mode 100755
index 0cd54739..00000000
--- a/support/render-data-asset-diagram.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-dot -Tpng $1 -o $2
\ No newline at end of file
diff --git a/support/render-data-flow-diagram.sh b/support/render-data-flow-diagram.sh
deleted file mode 100755
index 0cd54739..00000000
--- a/support/render-data-flow-diagram.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-dot -Tpng $1 -o $2
\ No newline at end of file
diff --git a/support/schema.json b/support/schema.json
index b2e5eb5e..bbbafcb2 100644
--- a/support/schema.json
+++ b/support/schema.json
@@ -32,6 +32,13 @@
"null"
]
},
+ "contact": {
+ "description": "Author contact info",
+ "type": [
+ "string",
+ "null"
+ ]
+ },
"homepage": {
"description": "Author homepage",
"type": [
@@ -44,6 +51,43 @@
"name"
]
},
+ "contributors": {
+ "description": "Contributors to the model",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Contributor name",
+ "type": [
+ "string",
+ "null"
+ ]
+ },
+ "contact": {
+ "description": "Contributor contact info",
+ "type": [
+ "string",
+ "null"
+ ]
+ },
+ "homepage": {
+ "description": "Contributor homepage",
+ "type": [
+ "string",
+ "null"
+ ]
+ }
+ },
+ "required": [
+ "name"
+ ]
+ }
+ },
"management_summary_comment": {
"description": "Individual management summary for the report",
"type": [
@@ -62,6 +106,27 @@
"mission-critical"
]
},
+ "application_description": {
+ "description": "General description of the application, its purpose and functionality.",
+ "type": "object",
+ "properties": {
+ "description": {
+ "description": "Application description for the report",
+ "type": [
+ "string",
+ "null"
+ ]
+ },
+ "images": {
+ "description": "Application images for the report",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true
+ }
+ }
+ },
"business_overview": {
"description": "Individual business overview for the report",
"type": "object",
@@ -470,7 +535,7 @@
"type": "boolean"
},
"data_assets_processed": {
- "description": "Data assets processed",
+ "description": "Data assets processed; ; all data assets stored or sent or received via a communication link (be it as a source or a target) are implicitly also processed and do not need to be listed here.",
"type": [
"array",
"null"
@@ -624,7 +689,7 @@
"description": "VPN",
"type": "boolean"
},
- "ip_filtered": {
+ "ip_filtered": {
"description": "IP filtered",
"type": "boolean"
},
@@ -680,9 +745,7 @@
"vpn",
"ip_filtered",
"readonly",
- "usage",
- "data_assets_sent",
- "data_assets_received"
+ "usage"
]
}
}
diff --git a/test/abuse_cases.yaml b/test/abuse_cases.yaml
new file mode 100644
index 00000000..6315745b
--- /dev/null
+++ b/test/abuse_cases.yaml
@@ -0,0 +1,30 @@
+abuse_cases:
+ Denial-of-Service: >
+ As a hacker I want to disturb the functionality of the backend system in order to cause indirect
+ financial damage via unusable features.
+ CPU-Cycle Theft: >
+ As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners.
+ Ransomware: >
+ As a hacker I want to encrypt the storage and file systems in order to demand ransom.
+ Identity Theft: >
+ As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside.
+ PII Theft: >
+ As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage
+ their repudiation by publishing them.
+
+ ERP-System Compromise: >
+ As a hacker I want to access the ERP-System in order to steal/modify sensitive business data.
+ Database Compromise: >
+ As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive
+ business data.
+ Contract Filesystem Compromise: >
+ As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data.
+ Cross-Site Scripting Attacks: >
+ As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and
+ cause reputational damage.
+ Denial-of-Service of Enduser Functionality: >
+ As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial
+ damage (lower sales).
+ Denial-of-Service of ERP/DB Functionality: >
+ As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect
+ financial damage via unusable internal ERP features (not related to customer portal).
diff --git a/test/all.json b/test/all.json
new file mode 100644
index 00000000..42123fa5
--- /dev/null
+++ b/test/all.json
@@ -0,0 +1,2807 @@
+{
+ "author": {
+ "name": "John Doe",
+ "homepage": "www.example.com"
+ },
+ "title": "Some Example Application",
+ "date": "2020-07-01T00:00:00Z",
+ "management_summary_comment": "Just some \u003cb\u003emore\u003c/b\u003e custom summary possible here...\n",
+ "business_overview": {
+ "description": "Some more \u003ci\u003edemo text\u003c/i\u003e here and even images..."
+ },
+ "technical_overview": {
+ "description": "Some more \u003ci\u003edemo text\u003c/i\u003e here and even images..."
+ },
+ "business_criticality": "important",
+ "security_requirements": {
+ "EU-DSGVO": "Mandatory EU-Datenschutzgrundverordnung",
+ "Input Validation": "Strict input validation is required to reduce the overall attack surface.",
+ "Securing Administrative Access": "Administrative access must be secured with strong encryption and multi-factor authentication."
+ },
+ "questions": {
+ "How are the admin clients managed/protected against compromise?": "",
+ "How are the build pipeline components managed/protected against compromise?": "Managed by XYZ\n",
+ "How are the development clients managed/protected against compromise?": "Managed by XYZ\n"
+ },
+ "abuse_cases": {
+ "CPU-Cycle Theft": "As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners.\n",
+ "Contract Filesystem Compromise": "As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data.\n",
+ "Cross-Site Scripting Attacks": "As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and cause reputational damage.\n",
+ "Database Compromise": "As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive business data.\n",
+ "Denial-of-Service": "As a hacker I want to disturb the functionality of the backend system in order to cause indirect financial damage via unusable features.\n",
+ "Denial-of-Service of ERP/DB Functionality": "As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect financial damage via unusable internal ERP features (not related to customer portal).\n",
+ "Denial-of-Service of Enduser Functionality": "As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial damage (lower sales).\n",
+ "ERP-System Compromise": "As a hacker I want to access the ERP-System in order to steal/modify sensitive business data.\n",
+ "Identity Theft": "As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside.\n",
+ "PII Theft": "As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage their repudiation by publishing them.\n",
+ "Ransomware": "As a hacker I want to encrypt the storage and file systems in order to demand ransom.\n"
+ },
+ "tags_available": [
+ "linux",
+ "apache",
+ "mysql",
+ "jboss",
+ "keycloak",
+ "jenkins",
+ "git",
+ "oracle",
+ "some-erp",
+ "vmware",
+ "aws",
+ "aws:ec2",
+ "aws:s3"
+ ],
+ "data_assets": {
+ "build-job-config": {
+ "id": "build-job-config",
+ "title": "Build Job Config",
+ "description": "Data for customizing of the build job system.",
+ "usage": "devops",
+ "origin": "Company XYZ",
+ "owner": "Company XYZ",
+ "confidentiality": "restricted",
+ "integrity": "critical",
+ "availability": "operational",
+ "justification_cia_rating": "Data for customizing of the build job system.\n"
+ },
+ "client-application-code": {
+ "id": "client-application-code",
+ "title": "Client Application Code",
+ "description": "Angular and other client-side code delivered by the application.",
+ "usage": "devops",
+ "origin": "Company ABC",
+ "owner": "Company ABC",
+ "integrity": "critical",
+ "availability": "important",
+ "justification_cia_rating": "The integrity of the public data is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n"
+ },
+ "contract-summaries": {
+ "id": "contract-summaries",
+ "title": "Customer Contract Summaries",
+ "description": "Customer Contract Summaries",
+ "origin": "Customer",
+ "owner": "Company XYZ",
+ "confidentiality": "restricted",
+ "integrity": "operational",
+ "availability": "operational",
+ "justification_cia_rating": "Just some summaries.\n"
+ },
+ "customer-accounts": {
+ "id": "customer-accounts",
+ "title": "Customer Accounts",
+ "description": "Customer Accounts (including transient credentials when entered for checking them)",
+ "origin": "Customer",
+ "owner": "Company XYZ",
+ "quantity": "many",
+ "confidentiality": "strictly-confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "Customer account data for using the portal are required to be available to offer the portal functionality.\n"
+ },
+ "customer-contracts": {
+ "id": "customer-contracts",
+ "title": "Customer Contracts",
+ "description": "Customer Contracts (PDF)",
+ "origin": "Customer",
+ "owner": "Company XYZ",
+ "quantity": "many",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "operational",
+ "justification_cia_rating": "Contract data might contain financial data as well as personally identifiable information (PII). The integrity and availability of contract data is required for clearing payment disputes.\n"
+ },
+ "customer-operational-data": {
+ "id": "customer-operational-data",
+ "title": "Customer Operational Data",
+ "description": "Customer Operational Data",
+ "origin": "Customer",
+ "owner": "Company XYZ",
+ "quantity": "many",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "Customer operational data for using the portal are required to be available to offer the portal functionality and are used in the backend transactions.\n"
+ },
+ "db-dumps": {
+ "id": "db-dumps",
+ "title": "Database Customizing and Dumps",
+ "description": "Data for customizing of the DB system, which might include full database dumps.",
+ "usage": "devops",
+ "tags": [
+ "oracle"
+ ],
+ "origin": "Company XYZ",
+ "owner": "Company XYZ",
+ "confidentiality": "strictly-confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "Data for customizing of the DB system, which might include full database dumps.\n"
+ },
+ "erp-customizing": {
+ "id": "erp-customizing",
+ "title": "ERP Customizing Data",
+ "description": "Data for customizing of the ERP system.",
+ "usage": "devops",
+ "origin": "Company XYZ",
+ "owner": "Company XYZ",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "Data for customizing of the ERP system.\n"
+ },
+ "erp-logs": {
+ "id": "erp-logs",
+ "title": "ERP Logs",
+ "description": "Logs generated by the ERP system.",
+ "usage": "devops",
+ "origin": "Company XYZ",
+ "owner": "Company XYZ",
+ "quantity": "many",
+ "confidentiality": "restricted",
+ "justification_cia_rating": "Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard transactional logs.\n"
+ },
+ "internal-business-data": {
+ "id": "internal-business-data",
+ "title": "Some Internal Business Data",
+ "description": "Internal business data of the ERP system used unrelated to the customer-facing processes.",
+ "origin": "Company XYZ",
+ "owner": "Company XYZ",
+ "quantity": "few",
+ "confidentiality": "strictly-confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for internal non-customer-portal-related stuff).\n"
+ },
+ "marketing-material": {
+ "id": "marketing-material",
+ "title": "Marketing Material",
+ "description": "Website and marketing data to inform potential customers and generate new leads.",
+ "usage": "devops",
+ "origin": "Company ABC",
+ "owner": "Company ABC",
+ "integrity": "important",
+ "availability": "important",
+ "justification_cia_rating": "The integrity of the public data is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n"
+ },
+ "server-application-code": {
+ "id": "server-application-code",
+ "title": "Server Application Code",
+ "description": "API and other server-side code of the application.",
+ "usage": "devops",
+ "origin": "Company ABC",
+ "owner": "Company ABC",
+ "confidentiality": "internal",
+ "integrity": "mission-critical",
+ "availability": "important",
+ "justification_cia_rating": "The integrity of the API code is critical to avoid reputational damage and the availability is important on the long-term scale (but not critical) to keep the growth rate of the customer base steady.\n"
+ }
+ },
+ "technical_assets": {
+ "apache-webserver": {
+ "id": "apache-webserver",
+ "title": "Apache Webserver",
+ "description": "Apache Webserver hosting the API code and client-side code",
+ "type": "process",
+ "size": "application",
+ "technology": "web-server",
+ "machine": "container",
+ "custom_developed_parts": true,
+ "owner": "Company ABC",
+ "confidentiality": "internal",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.\n",
+ "tags": [
+ "linux",
+ "apache",
+ "aws:ec2"
+ ],
+ "data_assets_processed": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data",
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_stored": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_formats_accepted": [
+ "json",
+ "file"
+ ],
+ "communication_links": [
+ {
+ "id": "apache-webserver\u003eerp-system-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "erp-system",
+ "title": "ERP System Traffic",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "apache-webserver\u003eauth-credential-check-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "identity-provider",
+ "title": "Auth Credential Check Traffic",
+ "description": "Link to the identity provider server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "backend-admin-client": {
+ "id": "backend-admin-client",
+ "title": "Backend Admin Client",
+ "description": "Backend admin client",
+ "usage": "devops",
+ "size": "component",
+ "technology": "browser",
+ "out_of_scope": true,
+ "used_as_client_by_human": true,
+ "justification_out_of_scope": "Owned and managed by ops provider",
+ "owner": "Company XYZ",
+ "confidentiality": "internal",
+ "integrity": "operational",
+ "availability": "operational",
+ "justification_cia_rating": "The client used by Company XYZ to administer the system.\n",
+ "data_assets_processed": [
+ "erp-logs"
+ ],
+ "communication_links": [
+ {
+ "id": "backend-admin-client\u003euser-management-access",
+ "source_id": "backend-admin-client",
+ "target_id": "ldap-auth-server",
+ "title": "User Management Access",
+ "description": "Link to the LDAP auth server for managing users",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backend-admin-client\u003eerp-web-access",
+ "source_id": "backend-admin-client",
+ "target_id": "erp-system",
+ "title": "ERP Web Access",
+ "description": "Link to the ERP system (Web)",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "erp-customizing"
+ ],
+ "data_assets_received": [
+ "erp-logs"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backend-admin-client\u003edb-update-access",
+ "source_id": "backend-admin-client",
+ "target_id": "sql-database",
+ "title": "DB Update Access",
+ "description": "Link to the database (JDBC tunneled via SSH)",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "db-dumps"
+ ],
+ "data_assets_received": [
+ "db-dumps",
+ "erp-logs",
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "backoffice-client": {
+ "id": "backoffice-client",
+ "title": "Backoffice Client",
+ "description": "Backoffice client",
+ "size": "component",
+ "technology": "desktop",
+ "out_of_scope": true,
+ "used_as_client_by_human": true,
+ "justification_out_of_scope": "Owned and managed by Company XYZ company",
+ "owner": "Company XYZ",
+ "confidentiality": "confidential",
+ "integrity": "important",
+ "availability": "important",
+ "justification_cia_rating": "The client used by Company XYZ to administer and use the system.\n",
+ "data_assets_processed": [
+ "customer-contracts",
+ "internal-business-data",
+ "erp-logs"
+ ],
+ "communication_links": [
+ {
+ "id": "backoffice-client\u003eerp-internal-access",
+ "source_id": "backoffice-client",
+ "target_id": "erp-system",
+ "title": "ERP Internal Access",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "tags": [
+ "some-erp"
+ ],
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backoffice-client\u003emarketing-cms-editing",
+ "source_id": "backoffice-client",
+ "target_id": "marketing-cms",
+ "title": "Marketing CMS Editing",
+ "description": "Link to the CMS for editing content",
+ "protocol": "https",
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "contract-fileserver": {
+ "id": "contract-fileserver",
+ "title": "Contract Fileserver",
+ "description": "NFS Filesystem for storing the contract PDFs",
+ "type": "datastore",
+ "size": "component",
+ "technology": "file-server",
+ "machine": "virtual",
+ "owner": "Company ABC",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "important",
+ "justification_cia_rating": "Contract data might contain financial data as well as personally identifiable information (PII). The integrity and availability of contract data is required for clearing payment disputes. The filesystem is also required to be available for storing new contracts of freshly generated customers.\n",
+ "tags": [
+ "linux",
+ "aws:s3"
+ ],
+ "data_assets_stored": [
+ "customer-contracts",
+ "contract-summaries"
+ ],
+ "data_formats_accepted": [
+ "file"
+ ]
+ },
+ "customer-client": {
+ "id": "customer-client",
+ "title": "Customer Web Client",
+ "description": "Customer Web Client",
+ "size": "component",
+ "technology": "browser",
+ "internet": true,
+ "out_of_scope": true,
+ "used_as_client_by_human": true,
+ "justification_out_of_scope": "Owned and managed by enduser customer",
+ "owner": "Customer",
+ "confidentiality": "internal",
+ "integrity": "operational",
+ "availability": "operational",
+ "justification_cia_rating": "The client used by the customer to access the system.\n",
+ "data_assets_processed": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code",
+ "marketing-material"
+ ],
+ "communication_links": [
+ {
+ "id": "customer-client\u003ecustomer-traffic",
+ "source_id": "customer-client",
+ "target_id": "load-balancer",
+ "title": "Customer Traffic",
+ "description": "Link to the load balancer",
+ "protocol": "https",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code",
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "erp-system": {
+ "id": "erp-system",
+ "title": "Backoffice ERP System",
+ "description": "ERP system",
+ "type": "process",
+ "technology": "erp",
+ "machine": "virtual",
+ "redundant": true,
+ "owner": "Company ABC",
+ "confidentiality": "strictly-confidential",
+ "integrity": "mission-critical",
+ "availability": "mission-critical",
+ "justification_cia_rating": "The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other Company XYZ internal processes.\n",
+ "tags": [
+ "linux"
+ ],
+ "data_assets_processed": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data",
+ "erp-customizing"
+ ],
+ "data_assets_stored": [
+ "erp-logs"
+ ],
+ "data_formats_accepted": [
+ "xml",
+ "file",
+ "serialization"
+ ],
+ "communication_links": [
+ {
+ "id": "erp-system\u003edatabase-traffic",
+ "source_id": "erp-system",
+ "target_id": "sql-database",
+ "title": "Database Traffic",
+ "description": "Link to the DB system",
+ "protocol": "jdbc",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "erp-system\u003enfs-filesystem-access",
+ "source_id": "erp-system",
+ "target_id": "contract-fileserver",
+ "title": "NFS Filesystem Access",
+ "description": "Link to the file system",
+ "protocol": "nfs",
+ "data_assets_sent": [
+ "customer-contracts"
+ ],
+ "data_assets_received": [
+ "customer-contracts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "external-dev-client": {
+ "id": "external-dev-client",
+ "title": "External Development Client",
+ "description": "External developer client",
+ "usage": "devops",
+ "technology": "devops-client",
+ "internet": true,
+ "multi_tenant": true,
+ "out_of_scope": true,
+ "used_as_client_by_human": true,
+ "justification_out_of_scope": "Owned and managed by external developers",
+ "owner": "External Developers",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "operational",
+ "justification_cia_rating": "The clients used by external developers to create parts of the application code.\n",
+ "tags": [
+ "linux"
+ ],
+ "data_assets_processed": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_stored": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_formats_accepted": [
+ "file"
+ ],
+ "communication_links": [
+ {
+ "id": "external-dev-client\u003egit-repo-code-write-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Code Write Access",
+ "description": "Link to the Git repo",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "external-dev-client\u003egit-repo-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Web-UI Access",
+ "description": "Link to the Git repo",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "external-dev-client\u003ejenkins-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "jenkins-buildserver",
+ "title": "Jenkins Web-UI Access",
+ "description": "Link to the Jenkins build server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "build-job-config"
+ ],
+ "data_assets_received": [
+ "build-job-config"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "git-repo": {
+ "id": "git-repo",
+ "title": "Git Repository",
+ "description": "Git repository server",
+ "usage": "devops",
+ "type": "process",
+ "technology": "sourcecode-repository",
+ "machine": "virtual",
+ "multi_tenant": true,
+ "owner": "Company ABC",
+ "confidentiality": "confidential",
+ "integrity": "important",
+ "availability": "important",
+ "justification_cia_rating": "The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is therefore rated as confidential.\n",
+ "tags": [
+ "linux",
+ "git"
+ ],
+ "data_assets_processed": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_stored": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_formats_accepted": [
+ "file"
+ ]
+ },
+ "identity-provider": {
+ "id": "identity-provider",
+ "title": "Identity Provider",
+ "description": "Identity provider server",
+ "type": "process",
+ "size": "component",
+ "technology": "identity-provider",
+ "machine": "virtual",
+ "owner": "Company ABC",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "The auth data of the application\n",
+ "tags": [
+ "linux",
+ "jboss",
+ "keycloak"
+ ],
+ "data_assets_processed": [
+ "customer-accounts"
+ ],
+ "communication_links": [
+ {
+ "id": "identity-provider\u003eldap-credential-check-traffic",
+ "source_id": "identity-provider",
+ "target_id": "ldap-auth-server",
+ "title": "LDAP Credential Check Traffic",
+ "description": "Link to the LDAP server",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "jenkins-buildserver": {
+ "id": "jenkins-buildserver",
+ "title": "Jenkins Buildserver",
+ "description": "Jenkins buildserver",
+ "usage": "devops",
+ "type": "process",
+ "technology": "build-pipeline",
+ "machine": "virtual",
+ "multi_tenant": true,
+ "owner": "Company ABC",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "important",
+ "justification_cia_rating": "The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk of reputation damage and application update unavailability when the build pipeline is compromised.\n",
+ "tags": [
+ "linux",
+ "jenkins"
+ ],
+ "data_assets_processed": [
+ "build-job-config",
+ "client-application-code",
+ "server-application-code",
+ "marketing-material"
+ ],
+ "data_assets_stored": [
+ "build-job-config",
+ "client-application-code",
+ "server-application-code",
+ "marketing-material"
+ ],
+ "data_formats_accepted": [
+ "file",
+ "serialization"
+ ],
+ "communication_links": [
+ {
+ "id": "jenkins-buildserver\u003egit-repo-code-read-access",
+ "source_id": "jenkins-buildserver",
+ "target_id": "git-repo",
+ "title": "Git Repo Code Read Access",
+ "description": "Link to the Git repository server",
+ "protocol": "ssh",
+ "readonly": true,
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "jenkins-buildserver\u003eapplication-deployment",
+ "source_id": "jenkins-buildserver",
+ "target_id": "apache-webserver",
+ "title": "Application Deployment",
+ "description": "Link to the Apache webserver",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "jenkins-buildserver\u003ecms-updates",
+ "source_id": "jenkins-buildserver",
+ "target_id": "marketing-cms",
+ "title": "CMS Updates",
+ "description": "Link to the CMS",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "ldap-auth-server": {
+ "id": "ldap-auth-server",
+ "title": "LDAP Auth Server",
+ "description": "LDAP authentication server",
+ "type": "datastore",
+ "size": "component",
+ "technology": "identity-store-ldap",
+ "encryption": "transparent",
+ "owner": "Company ABC",
+ "confidentiality": "confidential",
+ "integrity": "critical",
+ "availability": "critical",
+ "justification_cia_rating": "The auth data of the application\n",
+ "tags": [
+ "linux"
+ ],
+ "data_assets_processed": [
+ "customer-accounts"
+ ],
+ "data_assets_stored": [
+ "customer-accounts"
+ ]
+ },
+ "load-balancer": {
+ "id": "load-balancer",
+ "title": "Load Balancer",
+ "description": "Load Balancer (HA-Proxy)",
+ "type": "process",
+ "size": "component",
+ "technology": "load-balancer",
+ "owner": "Company ABC",
+ "confidentiality": "internal",
+ "integrity": "mission-critical",
+ "availability": "mission-critical",
+ "justification_cia_rating": "The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ usages of the portal and ERP system.\n",
+ "data_assets_processed": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data",
+ "client-application-code",
+ "marketing-material"
+ ],
+ "communication_links": [
+ {
+ "id": "load-balancer\u003eweb-application-traffic",
+ "source_id": "load-balancer",
+ "target_id": "apache-webserver",
+ "title": "Web Application Traffic",
+ "description": "Link to the web server",
+ "protocol": "http",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "load-balancer\u003ecms-content-traffic",
+ "source_id": "load-balancer",
+ "target_id": "marketing-cms",
+ "title": "CMS Content Traffic",
+ "description": "Link to the CMS server",
+ "protocol": "http",
+ "readonly": true,
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "marketing-cms": {
+ "id": "marketing-cms",
+ "title": "Marketing CMS",
+ "description": "CMS for the marketing content",
+ "type": "process",
+ "size": "application",
+ "technology": "cms",
+ "machine": "container",
+ "custom_developed_parts": true,
+ "owner": "Company ABC",
+ "confidentiality": "internal",
+ "integrity": "important",
+ "availability": "important",
+ "justification_cia_rating": "The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.\n",
+ "tags": [
+ "linux"
+ ],
+ "data_assets_processed": [
+ "marketing-material",
+ "customer-accounts"
+ ],
+ "data_assets_stored": [
+ "marketing-material"
+ ],
+ "communication_links": [
+ {
+ "id": "marketing-cms\u003eauth-traffic",
+ "source_id": "marketing-cms",
+ "target_id": "ldap-auth-server",
+ "title": "Auth Traffic",
+ "description": "Link to the LDAP auth server",
+ "protocol": "ldap",
+ "readonly": true,
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "sql-database": {
+ "id": "sql-database",
+ "title": "Customer Contract Database",
+ "description": "The database behind the ERP system",
+ "type": "datastore",
+ "size": "component",
+ "technology": "database",
+ "machine": "virtual",
+ "encryption": "data-with-symmetric-shared-key",
+ "owner": "Company ABC",
+ "confidentiality": "strictly-confidential",
+ "integrity": "mission-critical",
+ "availability": "mission-critical",
+ "justification_cia_rating": "The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also for other Company XYZ internal processes.\n",
+ "tags": [
+ "linux",
+ "mysql"
+ ],
+ "data_assets_processed": [
+ "db-dumps"
+ ],
+ "data_assets_stored": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ]
+ }
+ },
+ "trust_boundaries": {
+ "application-network": {
+ "id": "application-network",
+ "title": "Application Network",
+ "description": "Application Network",
+ "type": "network-cloud-provider",
+ "tags": [
+ "aws"
+ ],
+ "technical_assets_inside": [
+ "load-balancer"
+ ],
+ "trust_boundaries_nested": [
+ "web-dmz",
+ "erp-dmz",
+ "auth-env"
+ ]
+ },
+ "auth-env": {
+ "id": "auth-env",
+ "title": "Auth Handling Environment",
+ "description": "Auth Handling Environment",
+ "type": "execution-environment",
+ "technical_assets_inside": [
+ "identity-provider",
+ "ldap-auth-server"
+ ]
+ },
+ "dev-network": {
+ "id": "dev-network",
+ "title": "Dev Network",
+ "description": "Development Network",
+ "technical_assets_inside": [
+ "jenkins-buildserver",
+ "git-repo",
+ "backend-admin-client",
+ "backoffice-client"
+ ]
+ },
+ "erp-dmz": {
+ "id": "erp-dmz",
+ "title": "ERP DMZ",
+ "description": "ERP DMZ",
+ "type": "network-cloud-security-group",
+ "tags": [
+ "some-erp"
+ ],
+ "technical_assets_inside": [
+ "erp-system",
+ "contract-fileserver",
+ "sql-database"
+ ]
+ },
+ "web-dmz": {
+ "id": "web-dmz",
+ "title": "Web DMZ",
+ "description": "Web DMZ",
+ "type": "network-cloud-security-group",
+ "technical_assets_inside": [
+ "apache-webserver",
+ "marketing-cms"
+ ]
+ }
+ },
+ "shared_runtimes": {
+ "webapp-virtualization": {
+ "id": "webapp-virtualization",
+ "title": "WebApp and Backoffice Virtualization",
+ "description": "WebApp Virtualization",
+ "tags": [
+ "vmware"
+ ],
+ "technical_assets_running": [
+ "apache-webserver",
+ "marketing-cms",
+ "erp-system",
+ "contract-fileserver",
+ "sql-database"
+ ]
+ }
+ },
+ "individual_risk_categories": {
+ "something-strange": {
+ "id": "something-strange",
+ "title": "Some Individual Risk Example",
+ "description": "Some text describing the risk category...",
+ "impact": "Some text describing the impact...",
+ "asvs": "V0 - Something Strange",
+ "cheat_sheet": "https://example.com",
+ "action": "Some text describing the action...",
+ "mitigation": "Some text describing the mitigation...",
+ "check": "Check if XYZ...",
+ "detection_logic": "Some text describing the detection logic...",
+ "risk_assessment": "Some text describing the risk assessment...",
+ "false_positives": "Some text describing the most common types of false positives...",
+ "stride": "repudiation",
+ "cwe": 693
+ }
+ },
+ "built_in_risk_categories": {
+ "accidental-secret-leak": {
+ "id": "accidental-secret-leak",
+ "title": "Accidental Secret Leak",
+ "description": "Sourcecode repositories (including their histories) as well as artifact registries can accidentally contain secrets like checked-in or packaged-in passwords, API tokens, certificates, crypto keys, etc.",
+ "impact": "If this risk is unmitigated, attackers which have access to affected sourcecode repositories or artifact registries might find secrets accidentally checked-in.",
+ "asvs": "V14 - Configuration Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Build Pipeline Hardening",
+ "mitigation": "Establish measures preventing accidental check-in or package-in of secrets into sourcecode repositories and artifact registries. This starts by using good .gitignore and .dockerignore files, but does not stop there. See for example tools like \u003ci\u003e\"git-secrets\" or \"Talisman\"\u003c/i\u003e to have check-in preventive measures for secrets. Consider also to regularly scan your repositories for secrets accidentally checked-in using scanning tools like \u003ci\u003e\"gitleaks\" or \"gitrob\"\u003c/i\u003e.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope sourcecode repositories and artifact registries.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Usually no false positives.",
+ "function": "operations",
+ "stride": "information-disclosure",
+ "cwe": 200
+ },
+ "code-backdooring": {
+ "id": "code-backdooring",
+ "title": "Code Backdooring",
+ "description": "For each build-pipeline component Code Backdooring risks might arise where attackers compromise the build-pipeline in order to let backdoored artifacts be shipped into production. Aside from direct code backdooring this includes backdooring of dependencies and even of more lower-level build infrastructure, like backdooring compilers (similar to what the XcodeGhost malware did) or dependencies.",
+ "impact": "If this risk remains unmitigated, attackers might be able to execute code on and completely takeover production environments.",
+ "asvs": "V10 - Malicious Code Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html",
+ "action": "Build Pipeline Hardening",
+ "mitigation": "Reduce the attack surface of backdooring the build pipeline by not directly exposing the build pipeline components on the public internet and also not exposing it in front of unmanaged (out-of-scope) developer clients.Also consider the use of code signing to prevent code modifications.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope development relevant technical assets which are either accessed by out-of-scope unmanaged developer clients and/or are directly accessed by any kind of internet-located (non-VPN) component or are themselves directly located on the internet.",
+ "risk_assessment": "The risk rating depends on the confidentiality and integrity rating of the code being handled and deployed as well as the placement/calling of this technical asset on/from the internet.",
+ "false_positives": "When the build-pipeline and sourcecode-repo is not exposed to the internet and considered fully trusted (which implies that all accessing clients are also considered fully trusted in terms of their patch management and applied hardening, which must be equivalent to a managed developer client environment) this can be considered a false positive after individual review.",
+ "function": "operations",
+ "stride": "tampering",
+ "cwe": 912
+ },
+ "container-baseimage-backdooring": {
+ "id": "container-baseimage-backdooring",
+ "title": "Container Base Image Backdooring",
+ "description": "When a technical asset is built using container technologies, Base Image Backdooring risks might arise where base images and other layers used contain vulnerable components or backdoors.\u003cbr\u003e\u003cbr\u003eSee for example: \u003ca href=\"https://techcrunch.com/2018/06/15/tainted-crypto-mining-containers-pulled-from-docker-hub/\"\u003ehttps://techcrunch.com/2018/06/15/tainted-crypto-mining-containers-pulled-from-docker-hub/\u003c/a\u003e",
+ "impact": "If this risk is unmitigated, attackers might be able to deeply persist in the target system by executing code in deployed containers.",
+ "asvs": "V10 - Malicious Code Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html",
+ "action": "Container Infrastructure Hardening",
+ "mitigation": "Apply hardening of all container infrastructures (see for example the \u003ci\u003eCIS-Benchmarks for Docker and Kubernetes\u003c/i\u003e and the \u003ci\u003eDocker Bench for Security\u003c/i\u003e). Use only trusted base images of the original vendors, verify digital signatures and apply image creation best practices. Also consider using Google's \u003ci\u003eDistroless\u003c/i\u003e base images or otherwise very small base images. Regularly execute container image scans with tools checking the layers for vulnerable components.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS/CSVS applied?",
+ "detection_logic": "In-scope technical assets running as containers.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets.",
+ "false_positives": "Fully trusted (i.e. reviewed and cryptographically signed or similar) base images of containers can be considered as false positives after individual review.",
+ "function": "operations",
+ "stride": "tampering",
+ "cwe": 912
+ },
+ "container-platform-escape": {
+ "id": "container-platform-escape",
+ "title": "Container Platform Escape",
+ "description": "Container platforms are especially interesting targets for attackers as they host big parts of a containerized runtime infrastructure. When not configured and operated with security best practices in mind, attackers might exploit a vulnerability inside an container and escape towards the platform as highly privileged users. These scenarios might give attackers capabilities to attack every other container as owning the container platform (via container escape attacks) equals to owning every container.",
+ "impact": "If this risk is unmitigated, attackers which have successfully compromised a container (via other vulnerabilities) might be able to deeply persist in the target system by executing code in many deployed containers and the container platform itself.",
+ "asvs": "V14 - Configuration Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html",
+ "action": "Container Infrastructure Hardening",
+ "mitigation": "Apply hardening of all container infrastructures. \u003cp\u003eSee for example the \u003ci\u003eCIS-Benchmarks for Docker and Kubernetes\u003c/i\u003e as well as the \u003ci\u003eDocker Bench for Security\u003c/i\u003e ( \u003ca href=\"https://github.com/docker/docker-bench-security\"\u003ehttps://github.com/docker/docker-bench-security\u003c/a\u003e ) or \u003ci\u003eInSpec Checks for Docker and Kubernetes\u003c/i\u003e ( \u003ca href=\"https://github.com/dev-sec/cis-kubernetes-benchmark\"\u003ehttps://github.com/dev-sec/cis-docker-benchmark\u003c/a\u003e and \u003ca href=\"https://github.com/dev-sec/cis-kubernetes-benchmark\"\u003ehttps://github.com/dev-sec/cis-kubernetes-benchmark\u003c/a\u003e ). Use only trusted base images, verify digital signatures and apply image creation best practices. Also consider using Google's \u003cb\u003eDistroless\u003c/i\u003e base images or otherwise very small base images. Apply namespace isolation and nod affinity to separate pods from each other in terms of access and nodes the same style as you separate data.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS or CSVS chapter applied?",
+ "detection_logic": "In-scope container platforms.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Container platforms not running parts of the target architecture can be considered as false positives after individual review.",
+ "function": "operations",
+ "stride": "elevation-of-privilege",
+ "cwe": 1008
+ },
+ "cross-site-request-forgery": {
+ "id": "cross-site-request-forgery",
+ "title": "Cross-Site Request Forgery (CSRF)",
+ "description": "When a web application is accessed via web protocols Cross-Site Request Forgery (CSRF) risks might arise.",
+ "impact": "If this risk remains unmitigated, attackers might be able to trick logged-in victim users into unwanted actions within the web application by visiting an attacker controlled web site.",
+ "asvs": "V4 - Access Control Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html",
+ "action": "CSRF Prevention",
+ "mitigation": "Try to use anti-CSRF tokens ot the double-submit patterns (at least for logged-in requests). When your authentication scheme depends on cookies (like session or token cookies), consider marking them with the same-site flag. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope web applications accessed via typical web access protocols.",
+ "risk_assessment": "The risk rating depends on the integrity rating of the data sent across the communication link.",
+ "false_positives": "Web applications passing the authentication sate via custom headers instead of cookies can eventually be false positives. Also when the web application is not accessed via a browser-like component (i.e not by a human user initiating the request that gets passed through all components until it reaches the web application) this can be considered a false positive.",
+ "function": "development",
+ "cwe": 352
+ },
+ "cross-site-scripting": {
+ "id": "cross-site-scripting",
+ "title": "Cross-Site Scripting (XSS)",
+ "description": "For each web application Cross-Site Scripting (XSS) risks might arise. In terms of the overall risk level take other applications running on the same domain into account as well.",
+ "impact": "If this risk remains unmitigated, attackers might be able to access individual victim sessions and steal or modify user data.",
+ "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html",
+ "action": "XSS Prevention",
+ "mitigation": "Try to encode all values sent back to the browser and also handle DOM-manipulations in a safe way to avoid DOM-based XSS. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope web applications.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the data processed or stored in the web application.",
+ "false_positives": "When the technical asset is not accessed via a browser-like component (i.e not by a human user initiating the request that gets passed through all components until it reaches the web application) this can be considered a false positive.",
+ "function": "development",
+ "stride": "tampering",
+ "cwe": 79
+ },
+ "dos-risky-access-across-trust-boundary": {
+ "id": "dos-risky-access-across-trust-boundary",
+ "title": "DoS-risky Access Across Trust-Boundary",
+ "description": "Assets accessed across trust boundaries with critical or mission-critical availability rating are more prone to Denial-of-Service (DoS) risks.",
+ "impact": "If this risk remains unmitigated, attackers might be able to disturb the availability of important parts of the system.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html",
+ "action": "Anti-DoS Measures",
+ "mitigation": "Apply anti-DoS techniques like throttling and/or per-client load blocking with quotas. Also for maintenance access routes consider applying a VPN instead of public reachable interfaces. Generally applying redundancy on the targeted technical asset reduces the risk of DoS.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets (excluding load-balancer) with availability rating of critical or higher which have incoming data-flows across a network trust-boundary (excluding devops usage).",
+ "risk_assessment": "Matching technical assets with availability rating of critical or higher are at low risk. When the availability rating is mission-critical and neither a VPN nor IP filter for the incoming data-flow nor redundancy for the asset is applied, the risk-rating is considered medium.",
+ "false_positives": "When the accessed target operations are not time- or resource-consuming.",
+ "function": "operations",
+ "stride": "denial-of-service",
+ "cwe": 400
+ },
+ "incomplete-model": {
+ "id": "incomplete-model",
+ "title": "Incomplete Model",
+ "description": "When the threat model contains unknown technologies or transfers data over unknown protocols, this is an indicator for an incomplete model.",
+ "impact": "If this risk is unmitigated, other risks might not be noticed as the model is incomplete.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html",
+ "action": "Threat Modeling Completeness",
+ "mitigation": "Try to find out what technology or protocol is used instead of specifying that it is unknown.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "All technical assets and communication links with technology type or protocol type specified as unknown.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "information-disclosure",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "ldap-injection": {
+ "id": "ldap-injection",
+ "title": "LDAP-Injection",
+ "description": "When an LDAP server is accessed LDAP-Injection risks might arise. The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.",
+ "impact": "If this risk remains unmitigated, attackers might be able to modify LDAP queries and access more data from the LDAP server than allowed.",
+ "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html",
+ "action": "LDAP-Injection Prevention",
+ "mitigation": "Try to use libraries that properly encode LDAP meta characters in searches and queries to access the LDAP sever in order to stay safe from LDAP-Injection vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope clients accessing LDAP servers via typical LDAP access protocols.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the LDAP server itself and of the data assets processed or stored.",
+ "false_positives": "LDAP server queries by search values not consisting of parts controllable by the caller can be considered as false positives after individual review.",
+ "function": "development",
+ "stride": "tampering",
+ "cwe": 90
+ },
+ "missing-authentication": {
+ "id": "missing-authentication",
+ "title": "Missing Authentication",
+ "description": "Technical assets (especially multi-tenant systems) should authenticate incoming requests when the asset processes or stores sensitive data. ",
+ "impact": "If this risk is unmitigated, attackers might be able to access or modify sensitive data in an unauthenticated way.",
+ "asvs": "V2 - Authentication Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
+ "action": "Authentication of Incoming Requests",
+ "mitigation": "Apply an authentication method to the technical asset. To protect highly sensitive data consider the use of two-factor authentication for human users.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets (except load-balancer, reverse-proxy, service-registry, waf, ids, and ips and in-process calls) should authenticate incoming requests when the asset processes or stores sensitive data. This is especially the case for all multi-tenant assets (there even non-sensitive ones).",
+ "risk_assessment": "The risk rating (medium or high) depends on the sensitivity of the data sent across the communication link. Monitoring callers are exempted from this risk.",
+ "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "cwe": 306
+ },
+ "missing-authentication-second-factor": {
+ "id": "missing-authentication-second-factor",
+ "title": "Missing Two-Factor Authentication (2FA)",
+ "description": "Technical assets (especially multi-tenant systems) should authenticate incoming requests with two-factor (2FA) authentication when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by humans.",
+ "impact": "If this risk is unmitigated, attackers might be able to access or modify highly sensitive data without strong authentication.",
+ "asvs": "V2 - Authentication Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html",
+ "action": "Authentication with Second Factor (2FA)",
+ "mitigation": "Apply an authentication method to the technical asset protecting highly sensitive data via two-factor authentication for human users.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets (except load-balancer, reverse-proxy, waf, ids, and ips) should authenticate incoming requests via two-factor authentication (2FA) when the asset processes or stores highly sensitive data (in terms of confidentiality, integrity, and availability) and is accessed by a client used by a human user.",
+ "risk_assessment": "medium",
+ "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.",
+ "stride": "elevation-of-privilege",
+ "cwe": 308
+ },
+ "missing-build-infrastructure": {
+ "id": "missing-build-infrastructure",
+ "title": "Missing Build Infrastructure",
+ "description": "The modeled architecture does not contain a build infrastructure (devops-client, sourcecode-repo, build-pipeline, etc.), which might be the risk of a model missing critical assets (and thus not seeing their risks). If the architecture contains custom-developed parts, the pipeline where code gets developed and built needs to be part of the model.",
+ "impact": "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model due to critical build infrastructure components missing in the model.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Build Pipeline Hardening",
+ "mitigation": "Include the build infrastructure in the model.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Models with in-scope custom-developed parts missing in-scope development (code creation) and build infrastructure components (devops-client, sourcecode-repo, build-pipeline, etc.).",
+ "risk_assessment": "The risk rating depends on the highest sensitivity of the in-scope assets running custom-developed parts.",
+ "false_positives": "Models not having any custom-developed parts can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "tampering",
+ "model_failure_possible_reason": true,
+ "cwe": 1127
+ },
+ "missing-cloud-hardening": {
+ "id": "missing-cloud-hardening",
+ "title": "Missing Cloud Hardening",
+ "description": "Cloud components should be hardened according to the cloud vendor best practices. This affects their configuration, auditing, and further areas.",
+ "impact": "If this risk is unmitigated, attackers might access cloud components in an unintended way.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Cloud Hardening",
+ "mitigation": "Apply hardening of all cloud components and services, taking special care to follow the individual risk descriptions (which depend on the cloud provider tags in the model). \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eAmazon Web Services (AWS)\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Amazon Web Services\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"PacBot\", \"CloudSploit\", \"CloudMapper\", \"ScoutSuite\", or \"Prowler AWS CIS Benchmark Tool\"\u003c/i\u003e). \u003cbr\u003eFor EC2 and other servers running Amazon Linux, follow the \u003ci\u003eCIS Benchmark for Amazon Linux\u003c/i\u003e and switch to IMDSv2. \u003cbr\u003eFor S3 buckets follow the \u003ci\u003eSecurity Best Practices for Amazon S3\u003c/i\u003e at \u003ca href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html\"\u003ehttps://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html\u003c/a\u003e to avoid accidental leakage. \u003cbr\u003eAlso take a look at some of these tools: \u003ca href=\"https://github.com/toniblyx/my-arsenal-of-aws-security-tools\"\u003ehttps://github.com/toniblyx/my-arsenal-of-aws-security-tools\u003c/a\u003e \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eMicrosoft Azure\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Microsoft Azure\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\" or \"ScoutSuite\"\u003c/i\u003e).\u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eGoogle Cloud Platform\u003c/b\u003e: Follow the \u003ci\u003eCIS Benchmark for Google Cloud Computing Platform\u003c/i\u003e (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\" or \"ScoutSuite\"\u003c/i\u003e). \u003cbr\u003e\u003cbr\u003eFor \u003cb\u003eOracle Cloud Platform\u003c/b\u003e: Follow the hardening best practices (see also the automated checks of cloud audit tools like \u003ci\u003e\"CloudSploit\"\u003c/i\u003e).",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope cloud components (either residing in cloud trust boundaries or more specifically tagged with cloud provider types).",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Cloud components not running parts of the target architecture can be considered as false positives after individual review.",
+ "function": "operations",
+ "stride": "tampering",
+ "cwe": 1008
+ },
+ "missing-file-validation": {
+ "id": "missing-file-validation",
+ "title": "Missing File Validation",
+ "description": "When a technical asset accepts files, these input files should be strictly validated about filename and type.",
+ "impact": "If this risk is unmitigated, attackers might be able to provide malicious files to the application.",
+ "asvs": "V12 - File and Resources Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/File_Upload_Cheat_Sheet.html",
+ "action": "File Validation",
+ "mitigation": "Filter by file extension and discard (if feasible) the name provided. Whitelist the accepted file types and determine the mime-type on the server-side (for example via \"Apache Tika\" or similar checks). If the file is retrievable by end users and/or backoffice employees, consider performing scans for popular malware (if the files can be retrieved much later than they were uploaded, also apply a fresh malware scan during retrieval to scan with newer signatures of popular malware). Also enforce limits on maximum file size to avoid denial-of-service like scenarios.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets with custom-developed code accepting file data formats.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Fully trusted (i.e. cryptographically signed or similar) files can be considered as false positives after individual review.",
+ "function": "development",
+ "cwe": 434
+ },
+ "missing-hardening": {
+ "id": "missing-hardening",
+ "title": "Missing Hardening",
+ "description": "Technical assets with a Relative Attacker Attractiveness (RAA) value of 55 % or higher should be explicitly hardened taking best practices and vendor hardening guides into account.",
+ "impact": "If this risk remains unmitigated, attackers might be able to easier attack high-value targets.",
+ "asvs": "V14 - Configuration Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "System Hardening",
+ "mitigation": "Try to apply all hardening best practices (like CIS benchmarks, OWASP recommendations, vendor recommendations, DevSec Hardening Framework, DBSAT for Oracle databases, and others).",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets with RAA values of 55 % or higher. Generally for high-value targets like data stores, application servers, identity providers and ERP systems this limit is reduced to 40 %",
+ "risk_assessment": "The risk rating depends on the sensitivity of the data processed or stored in the technical asset.",
+ "false_positives": "Usually no false positives.",
+ "function": "operations",
+ "stride": "tampering",
+ "cwe": 16
+ },
+ "missing-identity-propagation": {
+ "id": "missing-identity-propagation",
+ "title": "Missing Identity Propagation",
+ "description": "Technical assets (especially multi-tenant systems), which usually process data for end users should authorize every request based on the identity of the end user when the data flow is authenticated (i.e. non-public). For DevOps usages at least a technical-user authorization is required.",
+ "impact": "If this risk is unmitigated, attackers might be able to access or modify foreign data after a successful compromise of a component within the system due to missing resource-based authorization checks.",
+ "asvs": "V4 - Access Control Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
+ "action": "Identity Propagation and Resource-based Authorization",
+ "mitigation": "When processing requests for end users if possible authorize in the backend against the propagated identity of the end user. This can be achieved in passing JWTs or similar tokens and checking them in the backend services. For DevOps usages apply at least a technical-user authorization.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope service-like technical assets which usually process data based on end user requests, if authenticated (i.e. non-public), should authorize incoming requests based on the propagated end user identity when their rating is sensitive. This is especially the case for all multi-tenant assets (there even less-sensitive rated ones). DevOps usages are exempted from this risk.",
+ "risk_assessment": "The risk rating (medium or high) depends on the confidentiality, integrity, and availability rating of the technical asset.",
+ "false_positives": "Technical assets which do not process requests regarding functionality or data linked to end-users (customers) can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "cwe": 284
+ },
+ "missing-identity-provider-isolation": {
+ "id": "missing-identity-provider-isolation",
+ "title": "Missing Identity Provider Isolation",
+ "description": "Highly sensitive identity provider assets and their identity data stores should be isolated from other assets by their own network segmentation trust-boundary (execution-environment boundaries do not count as network isolation).",
+ "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards highly sensitive identity provider assets and their identity data stores, as they are not separated by network segmentation.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Network Segmentation",
+ "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive identity provider assets and their identity data stores.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope identity provider assets and their identity data stores when surrounded by other (not identity-related) assets (without a network trust-boundary in-between). This risk is especially prevalent when other non-identity related assets are within the same execution environment (i.e. same database or same application server).",
+ "risk_assessment": "Default is high impact. The impact is increased to very-high when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.",
+ "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were identity providers with data of highest sensitivity.",
+ "function": "operations",
+ "stride": "elevation-of-privilege",
+ "cwe": 1008
+ },
+ "missing-identity-store": {
+ "id": "missing-identity-store",
+ "title": "Missing Identity Store",
+ "description": "The modeled architecture does not contain an identity store, which might be the risk of a model missing critical assets (and thus not seeing their risks).",
+ "impact": "If this risk is unmitigated, attackers might be able to exploit risks unseen in this threat model in the identity provider/store that is currently missing in the model.",
+ "asvs": "V2 - Authentication Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html",
+ "action": "Identity Store",
+ "mitigation": "Include an identity store in the model if the application has a login.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Models with authenticated data-flows authorized via end user identity missing an in-scope identity store.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the end user-identity authorized technical assets and their data assets processed and stored.",
+ "false_positives": "Models only offering data/services without any real authentication need can be considered as false positives after individual review.",
+ "function": "architecture",
+ "model_failure_possible_reason": true,
+ "cwe": 287
+ },
+ "missing-network-segmentation": {
+ "id": "missing-network-segmentation",
+ "title": "Missing Network Segmentation",
+ "description": "Highly sensitive assets and/or data stores residing in the same network segment than other lower sensitive assets (like webservers or content management systems etc.) should be better protected by a network segmentation trust-boundary.",
+ "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards more valuable targets, as they are not separated by network segmentation.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Network Segmentation",
+ "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive assets and/or data stores.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets with high sensitivity and RAA values as well as data stores when surrounded by assets (without a network trust-boundary in-between) which are of type client-system, web-server, web-application, cms, web-service-rest, web-service-soap, build-pipeline, sourcecode-repository, monitoring, or similar and there is no direct connection between these (hence no requirement to be so close to each other).",
+ "risk_assessment": "Default is low risk. The risk is increased to medium when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.",
+ "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were containing/processing highly sensitive data.",
+ "function": "operations",
+ "stride": "elevation-of-privilege",
+ "cwe": 1008
+ },
+ "missing-vault": {
+ "id": "missing-vault",
+ "title": "Missing Vault (Secret Storage)",
+ "description": "In order to avoid the risk of secret leakage via config files (when attacked through vulnerabilities being able to read files like Path-Traversal and others), it is best practice to use a separate hardened process with proper authentication, authorization, and audit logging to access config secrets (like credentials, private keys, client certificates, etc.). This component is usually some kind of Vault.",
+ "impact": "If this risk is unmitigated, attackers might be able to easier steal config secrets (like credentials, private keys, client certificates, etc.) once a vulnerability to access files is present and exploited.",
+ "asvs": "V6 - Stored Cryptography Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html",
+ "action": "Vault (Secret Storage)",
+ "mitigation": "Consider using a Vault (Secret Storage) to securely store and access config secrets (like credentials, private keys, client certificates, etc.).",
+ "check": "Is a Vault (Secret Storage) in place?",
+ "detection_logic": "Models without a Vault (Secret Storage).",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Models where no technical assets have any kind of sensitive config data to protect can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "information-disclosure",
+ "model_failure_possible_reason": true,
+ "cwe": 522
+ },
+ "missing-vault-isolation": {
+ "id": "missing-vault-isolation",
+ "title": "Missing Vault Isolation",
+ "description": "Highly sensitive vault assets and their data stores should be isolated from other assets by their own network segmentation trust-boundary (execution-environment boundaries do not count as network isolation).",
+ "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards highly sensitive vault assets and their data stores, as they are not separated by network segmentation.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Network Segmentation",
+ "mitigation": "Apply a network segmentation trust-boundary around the highly sensitive vault assets and their data stores.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope vault assets when surrounded by other (not vault-related) assets (without a network trust-boundary in-between). This risk is especially prevalent when other non-vault related assets are within the same execution environment (i.e. same database or same application server).",
+ "risk_assessment": "Default is medium impact. The impact is increased to high when the asset missing the trust-boundary protection is rated as strictly-confidential or mission-critical.",
+ "false_positives": "When all assets within the network segmentation trust-boundary are hardened and protected to the same extend as if all were vaults with data of highest sensitivity.",
+ "function": "operations",
+ "stride": "elevation-of-privilege",
+ "cwe": 1008
+ },
+ "missing-waf": {
+ "id": "missing-waf",
+ "title": "Missing Web Application Firewall (WAF)",
+ "description": "To have a first line of filtering defense, security architectures with web-services or web-applications should include a WAF in front of them. Even though a WAF is not a replacement for security (all components must be secure even without a WAF) it adds another layer of defense to the overall system by delaying some attacks and having easier attack alerting through it.",
+ "impact": "If this risk is unmitigated, attackers might be able to apply standard attack pattern tests at great speed without any filtering.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Virtual_Patching_Cheat_Sheet.html",
+ "action": "Web Application Firewall (WAF)",
+ "mitigation": "Consider placing a Web Application Firewall (WAF) in front of the web-services and/or web-applications. For cloud environments many cloud providers offer pre-configured WAFs. Even reverse proxies can be enhances by a WAF component via ModSecurity plugins.",
+ "check": "Is a Web Application Firewall (WAF) in place?",
+ "detection_logic": "In-scope web-services and/or web-applications accessed across a network trust boundary not having a Web Application Firewall (WAF) in front of them.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Targets only accessible via WAFs or reverse proxies containing a WAF component (like ModSecurity) can be considered as false positives after individual review.",
+ "function": "operations",
+ "stride": "tampering",
+ "cwe": 1008
+ },
+ "mixed-targets-on-shared-runtime": {
+ "id": "mixed-targets-on-shared-runtime",
+ "title": "Mixed Targets on Shared Runtime",
+ "description": "Different attacker targets (like frontend and backend/datastore components) should not be running on the same shared (underlying) runtime.",
+ "impact": "If this risk is unmitigated, attackers successfully attacking other components of the system might have an easy path towards more valuable targets, as they are running on the same shared runtime.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Runtime Separation",
+ "mitigation": "Use separate runtime environments for running different target components or apply similar separation styles to prevent load- or breach-related problems originating from one more attacker-facing asset impacts also the other more critical rated backend/datastore assets.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Shared runtime running technical assets of different trust-boundaries is at risk. Also mixing backend/datastore with frontend components on the same shared runtime is considered a risk.",
+ "risk_assessment": "The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of the technical asset running on the shared runtime.",
+ "false_positives": "When all assets running on the shared runtime are hardened and protected to the same extend as if all were containing/processing highly sensitive data.",
+ "function": "operations",
+ "stride": "elevation-of-privilege",
+ "cwe": 1008
+ },
+ "path-traversal": {
+ "id": "path-traversal",
+ "title": "Path-Traversal",
+ "description": "When a filesystem is accessed Path-Traversal or Local-File-Inclusion (LFI) risks might arise. The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed or stored.",
+ "impact": "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, business data files, etc.) from the filesystem of affected components.",
+ "asvs": "V12 - File and Resources Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Input_Validation_Cheat_Sheet.html",
+ "action": "Path-Traversal Prevention",
+ "mitigation": "Before accessing the file cross-check that it resides in the expected folder and is of the expected type and filename/suffix. Try to use a mapping if possible instead of directly accessing by a filename which is (partly or fully) provided by the caller. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Filesystems accessed by in-scope callers.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the data stored inside the technical asset.",
+ "false_positives": "File accesses by filenames not consisting of parts controllable by the caller can be considered as false positives after individual review.",
+ "function": "development",
+ "stride": "information-disclosure",
+ "cwe": 22
+ },
+ "push-instead-of-pull-deployment": {
+ "id": "push-instead-of-pull-deployment",
+ "title": "Push instead of Pull Deployment",
+ "description": "When comparing push-based vs. pull-based deployments from a security perspective, pull-based deployments improve the overall security of the deployment targets. Every exposed interface of a production system to accept a deployment increases the attack surface of the production system, thus a pull-based approach exposes less attack surface relevant interfaces.",
+ "impact": "If this risk is unmitigated, attackers might have more potential target vectors for attacks, as the overall attack surface is unnecessarily increased.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Build Pipeline Hardening",
+ "mitigation": "Try to prefer pull-based deployments (like GitOps scenarios offer) over push-based deployments to reduce the attack surface of the production system.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Models with build pipeline components accessing in-scope targets of deployment (in a non-readonly way) which are not build-related components themselves.",
+ "risk_assessment": "The risk rating depends on the highest sensitivity of the deployment targets running custom-developed parts.",
+ "false_positives": "Communication links that are not deployment paths can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "tampering",
+ "model_failure_possible_reason": true,
+ "cwe": 1127
+ },
+ "search-query-injection": {
+ "id": "search-query-injection",
+ "title": "Search-Query Injection",
+ "description": "When a search engine server is accessed Search-Query Injection risks might arise.\u003cbr\u003e\u003cbr\u003eSee for example \u003ca href=\"https://github.com/veracode-research/solr-injection\"\u003ehttps://github.com/veracode-research/solr-injection\u003c/a\u003e and \u003ca href=\"https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\"\u003ehttps://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\u003c/a\u003e for more details (here related to Solr, but in general showcasing the topic of search query injections).",
+ "impact": "If this risk remains unmitigated, attackers might be able to read more data from the search index and eventually further escalate towards a deeper system penetration via code executions.",
+ "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_Cheat_Sheet.html",
+ "action": "Search-Query Injection Prevention",
+ "mitigation": "Try to use libraries that properly encode search query meta characters in searches and don't expose the query unfiltered to the caller. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope clients accessing search engine servers via typical search access protocols.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed or stored.",
+ "false_positives": "Server engine queries by search values not consisting of parts controllable by the caller can be considered as false positives after individual review.",
+ "function": "development",
+ "stride": "tampering",
+ "cwe": 74
+ },
+ "server-side-request-forgery": {
+ "id": "server-side-request-forgery",
+ "title": "Server-Side Request Forgery (SSRF)",
+ "description": "When a server system (i.e. not a client) is accessing other server systems via typical web protocols Server-Side Request Forgery (SSRF) or Local-File-Inclusion (LFI) or Remote-File-Inclusion (RFI) risks might arise. ",
+ "impact": "If this risk is unmitigated, attackers might be able to access sensitive services or files of network-reachable components by modifying outgoing calls of affected components.",
+ "asvs": "V12 - File and Resources Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html",
+ "action": "SSRF Prevention",
+ "mitigation": "Try to avoid constructing the outgoing target URL with caller controllable values. Alternatively use a mapping (whitelist) when accessing outgoing URLs instead of creating them including caller controllable values. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope non-client systems accessing (using outgoing communication links) targets with either HTTP or HTTPS protocol.",
+ "risk_assessment": "The risk rating (low or medium) depends on the sensitivity of the data assets receivable via web protocols from targets within the same network trust-boundary as well on the sensitivity of the data assets receivable via web protocols from the target asset itself. Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF.",
+ "false_positives": "Servers not sending outgoing web requests can be considered as false positives after review.",
+ "function": "development",
+ "stride": "information-disclosure",
+ "cwe": 918
+ },
+ "service-registry-poisoning": {
+ "id": "service-registry-poisoning",
+ "title": "Service Registry Poisoning",
+ "description": "When a service registry used for discovery of trusted service endpoints Service Registry Poisoning risks might arise.",
+ "impact": "If this risk remains unmitigated, attackers might be able to poison the service registry with malicious service endpoints or malicious lookup and config data leading to breach of sensitive data.",
+ "asvs": "V10 - Malicious Code Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html",
+ "action": "Service Registry Integrity Check",
+ "mitigation": "Try to strengthen the access control of the service registry and apply cross-checks to detect maliciously poisoned lookup data.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope service registries.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical assets accessing the service registry as well as the data assets processed or stored.",
+ "false_positives": "Service registries not used for service discovery can be considered as false positives after individual review.",
+ "function": "architecture",
+ "cwe": 693
+ },
+ "sql-nosql-injection": {
+ "id": "sql-nosql-injection",
+ "title": "SQL/NoSQL-Injection",
+ "description": "When a database is accessed via database access protocols SQL/NoSQL-Injection risks might arise. The risk rating depends on the sensitivity technical asset itself and of the data assets processed or stored.",
+ "impact": "If this risk is unmitigated, attackers might be able to modify SQL/NoSQL queries to steal and modify data and eventually further escalate towards a deeper system penetration via code executions.",
+ "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html",
+ "action": "SQL/NoSQL-Injection Prevention",
+ "mitigation": "Try to use parameter binding to be safe from injection vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Database accessed via typical database access protocols by in-scope clients.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the data stored inside the database.",
+ "false_positives": "Database accesses by queries not consisting of parts controllable by the caller can be considered as false positives after individual review.",
+ "function": "development",
+ "stride": "tampering",
+ "cwe": 89
+ },
+ "unchecked-deployment": {
+ "id": "unchecked-deployment",
+ "title": "Unchecked Deployment",
+ "description": "For each build-pipeline component Unchecked Deployment risks might arise when the build-pipeline does not include established DevSecOps best-practices. DevSecOps best-practices scan as part of CI/CD pipelines for vulnerabilities in source- or byte-code, dependencies, container layers, and dynamically against running test systems. There are several open-source and commercial tools existing in the categories DAST, SAST, and IAST.",
+ "impact": "If this risk remains unmitigated, vulnerabilities in custom-developed software or their dependencies might not be identified during continuous deployment cycles.",
+ "asvs": "V14 - Configuration Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html",
+ "action": "Build Pipeline Hardening",
+ "mitigation": "Apply DevSecOps best-practices and use scanning tools to identify vulnerabilities in source- or byte-code,dependencies, container layers, and optionally also via dynamic scans against running test systems.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "All development-relevant technical assets.",
+ "risk_assessment": "The risk rating depends on the highest rating of the technical assets and data assets processed by deployment-receiving targets.",
+ "false_positives": "When the build-pipeline does not build any software components it can be considered a false positive after individual review.",
+ "function": "architecture",
+ "stride": "tampering",
+ "cwe": 1127
+ },
+ "unencrypted-asset": {
+ "id": "unencrypted-asset",
+ "title": "Unencrypted Technical Assets",
+ "description": "Due to the confidentiality rating of the technical asset itself and/or the processed data assets this technical asset must be encrypted. The risk rating depends on the sensitivity technical asset itself and of the data assets stored.",
+ "impact": "If this risk is unmitigated, attackers might be able to access unencrypted data when successfully compromising sensitive components.",
+ "asvs": "V6 - Stored Cryptography Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html",
+ "action": "Encryption of Technical Asset",
+ "mitigation": "Apply encryption to the technical asset.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope unencrypted technical assets (excluding reverse-proxy, load-balancer, waf, ids, ips and embedded components like library) storing data assets rated at least as confidential or critical. For technical assets storing data assets rated as strictly-confidential or mission-critical the encryption must be of type data-with-enduser-individual-key.",
+ "risk_assessment": "Depending on the confidentiality rating of the stored data-assets either medium or high risk.",
+ "false_positives": "When all sensitive data stored within the asset is already fully encrypted on document or data level.",
+ "function": "operations",
+ "stride": "information-disclosure",
+ "cwe": 311
+ },
+ "unencrypted-communication": {
+ "id": "unencrypted-communication",
+ "title": "Unencrypted Communication",
+ "description": "Due to the confidentiality and/or integrity rating of the data assets transferred over the communication link this connection must be encrypted.",
+ "impact": "If this risk is unmitigated, network attackers might be able to to eavesdrop on unencrypted sensitive data sent between components.",
+ "asvs": "V9 - Communication Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html",
+ "action": "Encryption of Communication Links",
+ "mitigation": "Apply transport layer encryption to the communication link.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Unencrypted technical communication links of in-scope technical assets (excluding monitoring traffic as well as local-file-access and in-process-library-call) transferring sensitive data.",
+ "risk_assessment": "Depending on the confidentiality rating of the transferred data-assets either medium or high risk.",
+ "false_positives": "When all sensitive data sent over the communication link is already fully encrypted on document or data level. Also intra-container/pod communication can be considered false positive when container orchestration platform handles encryption.",
+ "function": "operations",
+ "stride": "information-disclosure",
+ "cwe": 319
+ },
+ "unguarded-access-from-internet": {
+ "id": "unguarded-access-from-internet",
+ "title": "Unguarded Access From Internet",
+ "description": "Internet-exposed assets must be guarded by a protecting service, application, or reverse-proxy.",
+ "impact": "If this risk is unmitigated, attackers might be able to directly attack sensitive systems without any hardening components in-between due to them being directly exposed on the internet.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Encapsulation of Technical Asset",
+ "mitigation": "Encapsulate the asset behind a guarding service, application, or reverse-proxy. For admin maintenance a bastion-host should be used as a jump-server. For file transfer a store-and-forward-host should be used as an indirect file exchange platform.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets (excluding load-balancer) with confidentiality rating of confidential (or higher) or with integrity rating of critical (or higher) when accessed directly from the internet. All web-server, web-application, reverse-proxy, waf, and gateway assets are exempted from this risk when they do not consist of custom developed code and the data-flow only consists of HTTP or FTP protocols. Access from monitoring systems as well as VPN-protected connections are exempted.",
+ "risk_assessment": "The matching technical assets are at low risk. When either the confidentiality rating is strictly-confidential or the integrity rating is mission-critical, the risk-rating is considered medium. For assets with RAA values higher than 40 % the risk-rating increases.",
+ "false_positives": "When other means of filtering client requests are applied equivalent of reverse-proxy, waf, or gateway components.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "cwe": 501
+ },
+ "unguarded-direct-datastore-access": {
+ "id": "unguarded-direct-datastore-access",
+ "title": "Unguarded Direct Datastore Access",
+ "description": "Data stores accessed across trust boundaries must be guarded by some protecting service or application.",
+ "impact": "If this risk is unmitigated, attackers might be able to directly attack sensitive data stores without any protecting components in-between.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Encapsulation of Datastore",
+ "mitigation": "Encapsulate the datastore access behind a guarding service or application.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets of type datastore (except identity-store-ldap when accessed from identity-provider and file-server when accessed via file transfer protocols) with confidentiality rating of confidential (or higher) or with integrity rating of critical (or higher) which have incoming data-flows from assets outside across a network trust-boundary. DevOps config and deployment access is excluded from this risk.",
+ "risk_assessment": "The matching technical assets are at low risk. When either the confidentiality rating is strictly-confidential or the integrity rating is mission-critical, the risk-rating is considered medium. For assets with RAA values higher than 40 % the risk-rating increases.",
+ "false_positives": "When the caller is considered fully trusted as if it was part of the datastore itself.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "cwe": 501
+ },
+ "unnecessary-communication-link": {
+ "id": "unnecessary-communication-link",
+ "title": "Unnecessary Communication Link",
+ "description": "When a technical communication link does not send or receive any data assets, this is an indicator for an unnecessary communication link (or for an incomplete model).",
+ "impact": "If this risk is unmitigated, attackers might be able to target unnecessary communication links.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Attack Surface Reduction",
+ "mitigation": "Try to avoid using technical communication links that do not send or receive anything.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets' technical communication links not sending or receiving any data assets.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "unnecessary-data-asset": {
+ "id": "unnecessary-data-asset",
+ "title": "Unnecessary Data Asset",
+ "description": "When a data asset is not processed or stored by any data assets and also not transferred by any communication links, this is an indicator for an unnecessary data asset (or for an incomplete model).",
+ "impact": "If this risk is unmitigated, attackers might be able to access unnecessary data assets using other vulnerabilities.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Attack Surface Reduction",
+ "mitigation": "Try to avoid having data assets that are not required/used.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Modelled data assets not processed or stored by any data assets and also not transferred by any communication links.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "unnecessary-data-transfer": {
+ "id": "unnecessary-data-transfer",
+ "title": "Unnecessary Data Transfer",
+ "description": "When a technical asset sends or receives data assets, which it neither processes or stores this is an indicator for unnecessarily transferred data (or for an incomplete model). When the unnecessarily transferred data assets are sensitive, this poses an unnecessary risk of an increased attack surface.",
+ "impact": "If this risk is unmitigated, attackers might be able to target unnecessarily transferred data.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Attack Surface Reduction",
+ "mitigation": "Try to avoid sending or receiving sensitive data assets which are not required (i.e. neither processed or stored) by the involved technical asset.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets sending or receiving sensitive data assets which are neither processed nor stored by the technical asset are flagged with this risk. The risk rating (low or medium) depends on the confidentiality, integrity, and availability rating of the technical asset. Monitoring data is exempted from this risk.",
+ "risk_assessment": "The risk assessment is depending on the confidentiality and integrity rating of the transferred data asset either low or medium.",
+ "false_positives": "Technical assets missing the model entries of either processing or storing the mentioned data assets can be considered as false positives (incomplete models) after individual review. These should then be addressed by completing the model so that all necessary data assets are processed and/or stored by the technical asset involved.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "unnecessary-technical-asset": {
+ "id": "unnecessary-technical-asset",
+ "title": "Unnecessary Technical Asset",
+ "description": "When a technical asset does not process or store any data assets, this is an indicator for an unnecessary technical asset (or for an incomplete model). This is also the case if the asset has no communication links (either outgoing or incoming).",
+ "impact": "If this risk is unmitigated, attackers might be able to target unnecessary technical assets.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
+ "action": "Attack Surface Reduction",
+ "mitigation": "Try to avoid using technical assets that do not process or store anything.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Technical assets not processing or storing any data assets.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "untrusted-deserialization": {
+ "id": "untrusted-deserialization",
+ "title": "Untrusted Deserialization",
+ "description": "When a technical asset accepts data in a specific serialized form (like Java or .NET serialization), Untrusted Deserialization risks might arise.\u003cbr\u003e\u003cbr\u003eSee \u003ca href=\"https://christian-schneider.net/JavaDeserializationSecurityFAQ.html\"\u003ehttps://christian-schneider.net/JavaDeserializationSecurityFAQ.html\u003c/a\u003e for more details.",
+ "impact": "If this risk is unmitigated, attackers might be able to execute code on target systems by exploiting untrusted deserialization endpoints.",
+ "asvs": "V5 - Validation, Sanitization and Encoding Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html",
+ "action": "Prevention of Deserialization of Untrusted Data",
+ "mitigation": "Try to avoid the deserialization of untrusted data (even of data within the same trust-boundary as long as it is sent across a remote connection) in order to stay safe from Untrusted Deserialization vulnerabilities. Alternatively a strict whitelisting approach of the classes/types/values to deserialize might help as well. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets accepting serialization data formats (including EJB and RMI protocols).",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
+ "false_positives": "Fully trusted (i.e. cryptographically signed or similar) data deserialized can be considered as false positives after individual review.",
+ "function": "architecture",
+ "stride": "tampering",
+ "cwe": 502
+ },
+ "wrong-communication-link-content": {
+ "id": "wrong-communication-link-content",
+ "title": "Wrong Communication Link Content",
+ "description": "When a communication link is defined as readonly, but does not receive any data asset, or when it is defined as not readonly, but does not send any data asset, it is likely to be a model failure.",
+ "impact": "If this potential model error is not fixed, some risks might not be visible.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html",
+ "action": "Model Consistency",
+ "mitigation": "Try to model the correct readonly flag and/or data sent/received of communication links. Also try to use communication link types matching the target technology/machine types.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Communication links with inconsistent data assets being sent/received not matching their readonly flag or otherwise inconsistent protocols not matching the target technology type.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "information-disclosure",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "wrong-trust-boundary-content": {
+ "id": "wrong-trust-boundary-content",
+ "title": "Wrong Trust Boundary Content",
+ "description": "When a trust boundary of type network-policy-namespace-isolation contains non-container assets it is likely to be a model failure.",
+ "impact": "If this potential model error is not fixed, some risks might not be visible.",
+ "asvs": "V1 - Architecture, Design and Threat Modeling Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html",
+ "action": "Model Consistency",
+ "mitigation": "Try to model the correct types of trust boundaries and data assets.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "Trust boundaries which should only contain containers, but have different assets inside.",
+ "risk_assessment": "low",
+ "false_positives": "Usually no false positives as this looks like an incomplete model.",
+ "function": "architecture",
+ "stride": "elevation-of-privilege",
+ "model_failure_possible_reason": true,
+ "cwe": 1008
+ },
+ "xml-external-entity": {
+ "id": "xml-external-entity",
+ "title": "XML External Entity (XXE)",
+ "description": "When a technical asset accepts data in XML format, XML External Entity (XXE) risks might arise.",
+ "impact": "If this risk is unmitigated, attackers might be able to read sensitive files (configuration data, key/credential files, deployment files, business data files, etc.) form the filesystem of affected components and/or access sensitive services or files of other components.",
+ "asvs": "V14 - Configuration Verification Requirements",
+ "cheat_sheet": "https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html",
+ "action": "XML Parser Hardening",
+ "mitigation": "Apply hardening of all XML parser instances in order to stay safe from XML External Entity (XXE) vulnerabilities. When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
+ "check": "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
+ "detection_logic": "In-scope technical assets accepting XML data formats.",
+ "risk_assessment": "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored. Also for cloud-based environments the exploitation impact is at least medium, as cloud backend services can be attacked via SSRF (and XXE vulnerabilities are often also SSRF vulnerabilities).",
+ "false_positives": "Fully trusted (i.e. cryptographically signed or similar) XML data can be considered as false positives after individual review.",
+ "function": "development",
+ "stride": "information-disclosure",
+ "cwe": 611
+ }
+ },
+ "risk_tracking": {
+ "dos-risky-access-across-trust-boundary@*@*@*": {
+ "synthetic_risk_id": "dos-risky-access-across-trust-boundary@*@*@*",
+ "justification": "The hardening measures are being implemented and checked",
+ "ticket": "XYZ-1234",
+ "checked_by": "John Doe",
+ "status": "in-progress",
+ "date": "2020-01-04T00:00:00Z"
+ },
+ "ldap-injection@*@ldap-auth-server@*": {
+ "synthetic_risk_id": "ldap-injection@*@ldap-auth-server@*",
+ "justification": "The hardening measures were implemented and checked",
+ "ticket": "XYZ-5678",
+ "checked_by": "John Doe",
+ "status": "mitigated",
+ "date": "2020-01-05T00:00:00Z"
+ },
+ "missing-authentication-second-factor@*@*@*": {
+ "synthetic_risk_id": "missing-authentication-second-factor@*@*@*",
+ "justification": "The hardening measures were implemented and checked",
+ "ticket": "XYZ-1234",
+ "checked_by": "John Doe",
+ "status": "mitigated",
+ "date": "2020-01-04T00:00:00Z"
+ },
+ "missing-hardening@*": {
+ "synthetic_risk_id": "missing-hardening@*",
+ "justification": "The hardening measures were implemented and checked",
+ "ticket": "XYZ-1234",
+ "checked_by": "John Doe",
+ "status": "mitigated",
+ "date": "2020-01-04T00:00:00Z"
+ },
+ "unencrypted-asset@*": {
+ "synthetic_risk_id": "unencrypted-asset@*",
+ "justification": "The hardening measures were implemented and checked",
+ "ticket": "XYZ-1234",
+ "checked_by": "John Doe",
+ "status": "mitigated",
+ "date": "2020-01-04T00:00:00Z"
+ },
+ "untrusted-deserialization@erp-system": {
+ "synthetic_risk_id": "untrusted-deserialization@erp-system",
+ "justification": "Risk accepted as tolerable",
+ "ticket": "XYZ-1234",
+ "checked_by": "John Doe",
+ "status": "accepted",
+ "date": "2020-01-04T00:00:00Z"
+ }
+ },
+ "communication_links": {
+ "apache-webserver\u003eauth-credential-check-traffic": {
+ "id": "apache-webserver\u003eauth-credential-check-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "identity-provider",
+ "title": "Auth Credential Check Traffic",
+ "description": "Link to the identity provider server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "apache-webserver\u003eerp-system-traffic": {
+ "id": "apache-webserver\u003eerp-system-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "erp-system",
+ "title": "ERP System Traffic",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "backend-admin-client\u003edb-update-access": {
+ "id": "backend-admin-client\u003edb-update-access",
+ "source_id": "backend-admin-client",
+ "target_id": "sql-database",
+ "title": "DB Update Access",
+ "description": "Link to the database (JDBC tunneled via SSH)",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "db-dumps"
+ ],
+ "data_assets_received": [
+ "db-dumps",
+ "erp-logs",
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "backend-admin-client\u003eerp-web-access": {
+ "id": "backend-admin-client\u003eerp-web-access",
+ "source_id": "backend-admin-client",
+ "target_id": "erp-system",
+ "title": "ERP Web Access",
+ "description": "Link to the ERP system (Web)",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "erp-customizing"
+ ],
+ "data_assets_received": [
+ "erp-logs"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "backend-admin-client\u003euser-management-access": {
+ "id": "backend-admin-client\u003euser-management-access",
+ "source_id": "backend-admin-client",
+ "target_id": "ldap-auth-server",
+ "title": "User Management Access",
+ "description": "Link to the LDAP auth server for managing users",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "backoffice-client\u003eerp-internal-access": {
+ "id": "backoffice-client\u003eerp-internal-access",
+ "source_id": "backoffice-client",
+ "target_id": "erp-system",
+ "title": "ERP Internal Access",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "tags": [
+ "some-erp"
+ ],
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "backoffice-client\u003emarketing-cms-editing": {
+ "id": "backoffice-client\u003emarketing-cms-editing",
+ "source_id": "backoffice-client",
+ "target_id": "marketing-cms",
+ "title": "Marketing CMS Editing",
+ "description": "Link to the CMS for editing content",
+ "protocol": "https",
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "customer-client\u003ecustomer-traffic": {
+ "id": "customer-client\u003ecustomer-traffic",
+ "source_id": "customer-client",
+ "target_id": "load-balancer",
+ "title": "Customer Traffic",
+ "description": "Link to the load balancer",
+ "protocol": "https",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code",
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "erp-system\u003edatabase-traffic": {
+ "id": "erp-system\u003edatabase-traffic",
+ "source_id": "erp-system",
+ "target_id": "sql-database",
+ "title": "Database Traffic",
+ "description": "Link to the DB system",
+ "protocol": "jdbc",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "erp-system\u003enfs-filesystem-access": {
+ "id": "erp-system\u003enfs-filesystem-access",
+ "source_id": "erp-system",
+ "target_id": "contract-fileserver",
+ "title": "NFS Filesystem Access",
+ "description": "Link to the file system",
+ "protocol": "nfs",
+ "data_assets_sent": [
+ "customer-contracts"
+ ],
+ "data_assets_received": [
+ "customer-contracts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "external-dev-client\u003egit-repo-code-write-access": {
+ "id": "external-dev-client\u003egit-repo-code-write-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Code Write Access",
+ "description": "Link to the Git repo",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "external-dev-client\u003egit-repo-web-ui-access": {
+ "id": "external-dev-client\u003egit-repo-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Web-UI Access",
+ "description": "Link to the Git repo",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "external-dev-client\u003ejenkins-web-ui-access": {
+ "id": "external-dev-client\u003ejenkins-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "jenkins-buildserver",
+ "title": "Jenkins Web-UI Access",
+ "description": "Link to the Jenkins build server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "build-job-config"
+ ],
+ "data_assets_received": [
+ "build-job-config"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "identity-provider\u003eldap-credential-check-traffic": {
+ "id": "identity-provider\u003eldap-credential-check-traffic",
+ "source_id": "identity-provider",
+ "target_id": "ldap-auth-server",
+ "title": "LDAP Credential Check Traffic",
+ "description": "Link to the LDAP server",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "jenkins-buildserver\u003eapplication-deployment": {
+ "id": "jenkins-buildserver\u003eapplication-deployment",
+ "source_id": "jenkins-buildserver",
+ "target_id": "apache-webserver",
+ "title": "Application Deployment",
+ "description": "Link to the Apache webserver",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "jenkins-buildserver\u003ecms-updates": {
+ "id": "jenkins-buildserver\u003ecms-updates",
+ "source_id": "jenkins-buildserver",
+ "target_id": "marketing-cms",
+ "title": "CMS Updates",
+ "description": "Link to the CMS",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "jenkins-buildserver\u003egit-repo-code-read-access": {
+ "id": "jenkins-buildserver\u003egit-repo-code-read-access",
+ "source_id": "jenkins-buildserver",
+ "target_id": "git-repo",
+ "title": "Git Repo Code Read Access",
+ "description": "Link to the Git repository server",
+ "protocol": "ssh",
+ "readonly": true,
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "load-balancer\u003ecms-content-traffic": {
+ "id": "load-balancer\u003ecms-content-traffic",
+ "source_id": "load-balancer",
+ "target_id": "marketing-cms",
+ "title": "CMS Content Traffic",
+ "description": "Link to the CMS server",
+ "protocol": "http",
+ "readonly": true,
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "load-balancer\u003eweb-application-traffic": {
+ "id": "load-balancer\u003eweb-application-traffic",
+ "source_id": "load-balancer",
+ "target_id": "apache-webserver",
+ "title": "Web Application Traffic",
+ "description": "Link to the web server",
+ "protocol": "http",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ "marketing-cms\u003eauth-traffic": {
+ "id": "marketing-cms\u003eauth-traffic",
+ "source_id": "marketing-cms",
+ "target_id": "ldap-auth-server",
+ "title": "Auth Traffic",
+ "description": "Link to the LDAP auth server",
+ "protocol": "ldap",
+ "readonly": true,
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ },
+ "diagram_tweak_nodesep": 2,
+ "diagram_tweak_ranksep": 2,
+ "incoming_technical_communication_links_mapped_by_target_id": {
+ "apache-webserver": [
+ {
+ "id": "load-balancer\u003eweb-application-traffic",
+ "source_id": "load-balancer",
+ "target_id": "apache-webserver",
+ "title": "Web Application Traffic",
+ "description": "Link to the web server",
+ "protocol": "http",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "jenkins-buildserver\u003eapplication-deployment",
+ "source_id": "jenkins-buildserver",
+ "target_id": "apache-webserver",
+ "title": "Application Deployment",
+ "description": "Link to the Apache webserver",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "contract-fileserver": [
+ {
+ "id": "erp-system\u003enfs-filesystem-access",
+ "source_id": "erp-system",
+ "target_id": "contract-fileserver",
+ "title": "NFS Filesystem Access",
+ "description": "Link to the file system",
+ "protocol": "nfs",
+ "data_assets_sent": [
+ "customer-contracts"
+ ],
+ "data_assets_received": [
+ "customer-contracts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "erp-system": [
+ {
+ "id": "backend-admin-client\u003eerp-web-access",
+ "source_id": "backend-admin-client",
+ "target_id": "erp-system",
+ "title": "ERP Web Access",
+ "description": "Link to the ERP system (Web)",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "erp-customizing"
+ ],
+ "data_assets_received": [
+ "erp-logs"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backoffice-client\u003eerp-internal-access",
+ "source_id": "backoffice-client",
+ "target_id": "erp-system",
+ "title": "ERP Internal Access",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "tags": [
+ "some-erp"
+ ],
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "apache-webserver\u003eerp-system-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "erp-system",
+ "title": "ERP System Traffic",
+ "description": "Link to the ERP system",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "git-repo": [
+ {
+ "id": "external-dev-client\u003egit-repo-code-write-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Code Write Access",
+ "description": "Link to the Git repo",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "external-dev-client\u003egit-repo-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "git-repo",
+ "title": "Git-Repo Web-UI Access",
+ "description": "Link to the Git repo",
+ "protocol": "https",
+ "authentication": "token",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "jenkins-buildserver\u003egit-repo-code-read-access",
+ "source_id": "jenkins-buildserver",
+ "target_id": "git-repo",
+ "title": "Git Repo Code Read Access",
+ "description": "Link to the Git repository server",
+ "protocol": "ssh",
+ "readonly": true,
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_received": [
+ "client-application-code",
+ "server-application-code"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "identity-provider": [
+ {
+ "id": "apache-webserver\u003eauth-credential-check-traffic",
+ "source_id": "apache-webserver",
+ "target_id": "identity-provider",
+ "title": "Auth Credential Check Traffic",
+ "description": "Link to the identity provider server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "jenkins-buildserver": [
+ {
+ "id": "external-dev-client\u003ejenkins-web-ui-access",
+ "source_id": "external-dev-client",
+ "target_id": "jenkins-buildserver",
+ "title": "Jenkins Web-UI Access",
+ "description": "Link to the Jenkins build server",
+ "protocol": "https",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "build-job-config"
+ ],
+ "data_assets_received": [
+ "build-job-config"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "ldap-auth-server": [
+ {
+ "id": "identity-provider\u003eldap-credential-check-traffic",
+ "source_id": "identity-provider",
+ "target_id": "ldap-auth-server",
+ "title": "LDAP Credential Check Traffic",
+ "description": "Link to the LDAP server",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "marketing-cms\u003eauth-traffic",
+ "source_id": "marketing-cms",
+ "target_id": "ldap-auth-server",
+ "title": "Auth Traffic",
+ "description": "Link to the LDAP auth server",
+ "protocol": "ldap",
+ "readonly": true,
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backend-admin-client\u003euser-management-access",
+ "source_id": "backend-admin-client",
+ "target_id": "ldap-auth-server",
+ "title": "User Management Access",
+ "description": "Link to the LDAP auth server for managing users",
+ "protocol": "ldaps",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "customer-accounts"
+ ],
+ "data_assets_received": [
+ "customer-accounts"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "load-balancer": [
+ {
+ "id": "customer-client\u003ecustomer-traffic",
+ "source_id": "customer-client",
+ "target_id": "load-balancer",
+ "title": "Customer Traffic",
+ "description": "Link to the load balancer",
+ "protocol": "https",
+ "authentication": "session-id",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "customer-contracts",
+ "client-application-code",
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "marketing-cms": [
+ {
+ "id": "load-balancer\u003ecms-content-traffic",
+ "source_id": "load-balancer",
+ "target_id": "marketing-cms",
+ "title": "CMS Content Traffic",
+ "description": "Link to the CMS server",
+ "protocol": "http",
+ "readonly": true,
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "backoffice-client\u003emarketing-cms-editing",
+ "source_id": "backoffice-client",
+ "target_id": "marketing-cms",
+ "title": "Marketing CMS Editing",
+ "description": "Link to the CMS for editing content",
+ "protocol": "https",
+ "vpn": true,
+ "authentication": "token",
+ "authorization": "enduser-identity-propagation",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "data_assets_received": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "jenkins-buildserver\u003ecms-updates",
+ "source_id": "jenkins-buildserver",
+ "target_id": "marketing-cms",
+ "title": "CMS Updates",
+ "description": "Link to the CMS",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "marketing-material"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ],
+ "sql-database": [
+ {
+ "id": "backend-admin-client\u003edb-update-access",
+ "source_id": "backend-admin-client",
+ "target_id": "sql-database",
+ "title": "DB Update Access",
+ "description": "Link to the database (JDBC tunneled via SSH)",
+ "protocol": "ssh",
+ "authentication": "client-certificate",
+ "authorization": "technical-user",
+ "usage": "devops",
+ "data_assets_sent": [
+ "db-dumps"
+ ],
+ "data_assets_received": [
+ "db-dumps",
+ "erp-logs",
+ "customer-accounts",
+ "customer-operational-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ },
+ {
+ "id": "erp-system\u003edatabase-traffic",
+ "source_id": "erp-system",
+ "target_id": "sql-database",
+ "title": "Database Traffic",
+ "description": "Link to the DB system",
+ "protocol": "jdbc",
+ "authentication": "credentials",
+ "authorization": "technical-user",
+ "data_assets_sent": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "data_assets_received": [
+ "customer-accounts",
+ "customer-operational-data",
+ "internal-business-data"
+ ],
+ "diagram_tweak_weight": 1,
+ "diagram_tweak_constraint": true
+ }
+ ]
+ },
+ "direct_containing_trust_boundary_mapped_by_technical_asset_id": {
+ "apache-webserver": {
+ "id": "web-dmz",
+ "title": "Web DMZ",
+ "description": "Web DMZ",
+ "type": "network-cloud-security-group",
+ "technical_assets_inside": [
+ "apache-webserver",
+ "marketing-cms"
+ ]
+ },
+ "backend-admin-client": {
+ "id": "dev-network",
+ "title": "Dev Network",
+ "description": "Development Network",
+ "technical_assets_inside": [
+ "jenkins-buildserver",
+ "git-repo",
+ "backend-admin-client",
+ "backoffice-client"
+ ]
+ },
+ "backoffice-client": {
+ "id": "dev-network",
+ "title": "Dev Network",
+ "description": "Development Network",
+ "technical_assets_inside": [
+ "jenkins-buildserver",
+ "git-repo",
+ "backend-admin-client",
+ "backoffice-client"
+ ]
+ },
+ "contract-fileserver": {
+ "id": "erp-dmz",
+ "title": "ERP DMZ",
+ "description": "ERP DMZ",
+ "type": "network-cloud-security-group",
+ "tags": [
+ "some-erp"
+ ],
+ "technical_assets_inside": [
+ "erp-system",
+ "contract-fileserver",
+ "sql-database"
+ ]
+ },
+ "erp-system": {
+ "id": "erp-dmz",
+ "title": "ERP DMZ",
+ "description": "ERP DMZ",
+ "type": "network-cloud-security-group",
+ "tags": [
+ "some-erp"
+ ],
+ "technical_assets_inside": [
+ "erp-system",
+ "contract-fileserver",
+ "sql-database"
+ ]
+ },
+ "git-repo": {
+ "id": "dev-network",
+ "title": "Dev Network",
+ "description": "Development Network",
+ "technical_assets_inside": [
+ "jenkins-buildserver",
+ "git-repo",
+ "backend-admin-client",
+ "backoffice-client"
+ ]
+ },
+ "identity-provider": {
+ "id": "auth-env",
+ "title": "Auth Handling Environment",
+ "description": "Auth Handling Environment",
+ "type": "execution-environment",
+ "technical_assets_inside": [
+ "identity-provider",
+ "ldap-auth-server"
+ ]
+ },
+ "jenkins-buildserver": {
+ "id": "dev-network",
+ "title": "Dev Network",
+ "description": "Development Network",
+ "technical_assets_inside": [
+ "jenkins-buildserver",
+ "git-repo",
+ "backend-admin-client",
+ "backoffice-client"
+ ]
+ },
+ "ldap-auth-server": {
+ "id": "auth-env",
+ "title": "Auth Handling Environment",
+ "description": "Auth Handling Environment",
+ "type": "execution-environment",
+ "technical_assets_inside": [
+ "identity-provider",
+ "ldap-auth-server"
+ ]
+ },
+ "load-balancer": {
+ "id": "application-network",
+ "title": "Application Network",
+ "description": "Application Network",
+ "type": "network-cloud-provider",
+ "tags": [
+ "aws"
+ ],
+ "technical_assets_inside": [
+ "load-balancer"
+ ],
+ "trust_boundaries_nested": [
+ "web-dmz",
+ "erp-dmz",
+ "auth-env"
+ ]
+ },
+ "marketing-cms": {
+ "id": "web-dmz",
+ "title": "Web DMZ",
+ "description": "Web DMZ",
+ "type": "network-cloud-security-group",
+ "technical_assets_inside": [
+ "apache-webserver",
+ "marketing-cms"
+ ]
+ },
+ "sql-database": {
+ "id": "erp-dmz",
+ "title": "ERP DMZ",
+ "description": "ERP DMZ",
+ "type": "network-cloud-security-group",
+ "tags": [
+ "some-erp"
+ ],
+ "technical_assets_inside": [
+ "erp-system",
+ "contract-fileserver",
+ "sql-database"
+ ]
+ }
+ },
+ "generated_risks_by_category": {
+ "something-strange": [
+ {
+ "category": "something-strange",
+ "severity": "critical",
+ "exploitation_likelihood": "likely",
+ "exploitation_impact": "medium",
+ "title": "\u003cb\u003eExample Individual Risk\u003c/b\u003e at \u003cb\u003eDatabase\u003c/b\u003e",
+ "synthetic_id": "something-strange@sql-database",
+ "most_relevant_technical_asset": "sql-database",
+ "data_breach_probability": "probable",
+ "data_breach_technical_assets": [
+ "sql-database"
+ ]
+ },
+ {
+ "category": "something-strange",
+ "severity": "medium",
+ "exploitation_likelihood": "frequent",
+ "exploitation_impact": "very-high",
+ "title": "\u003cb\u003eExample Individual Risk\u003c/b\u003e at \u003cb\u003eContract Filesystem\u003c/b\u003e",
+ "synthetic_id": "something-strange@contract-fileserver",
+ "most_relevant_technical_asset": "contract-fileserver"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/test/all.yaml b/test/all.yaml
new file mode 100644
index 00000000..77815f77
--- /dev/null
+++ b/test/all.yaml
@@ -0,0 +1,1354 @@
+threagile_version: 1.0.0
+
+# NOTE:
+#
+# For a perfect editing experience within your IDE of choice you can easily
+# get model syntax validation and autocompletion (very handy for enum values)
+# as well as live templates: Just import the schema.json into your IDE and assign
+# it as "schema" to each Threagile YAML file. Also try to import individual parts
+# from the live-templates.txt file into your IDE as live editing templates.
+#
+# You might also want to try the REST API when running in server mode...
+
+
+
+title: Some Example Application
+
+date: 2020-07-01
+
+author:
+ name: John Doe
+ homepage: www.example.com
+
+
+
+
+management_summary_comment: >
+ Just some more custom summary possible here...
+
+business_criticality: important # values: archive, operational, important, critical, mission-critical
+
+
+
+
+business_overview:
+ description: Some more demo text here and even images...
+ images:
+# - custom-image-1.png: Some dummy image 1
+# - custom-image-2.png: Some dummy image 2
+
+
+technical_overview:
+ description: Some more demo text here and even images...
+ images:
+# - custom-image-1.png: Some dummy image 1
+# - custom-image-2.png: Some dummy image 2
+
+
+
+questions: # simply use "" as answer to signal "unanswered"
+ How are the admin clients managed/protected against compromise?: ""
+ How are the development clients managed/protected against compromise?: >
+ Managed by XYZ
+ How are the build pipeline components managed/protected against compromise?: >
+ Managed by XYZ
+
+
+
+abuse_cases:
+ Denial-of-Service: >
+ As a hacker I want to disturb the functionality of the backend system in order to cause indirect
+ financial damage via unusable features.
+ CPU-Cycle Theft: >
+ As a hacker I want to steal CPU cycles in order to transform them into money via installed crypto currency miners.
+ Ransomware: >
+ As a hacker I want to encrypt the storage and file systems in order to demand ransom.
+ Identity Theft: >
+ As a hacker I want to steal identity data in order to reuse credentials and/or keys on other targets of the same company or outside.
+ PII Theft: >
+ As a hacker I want to steal PII (Personally Identifiable Information) data in order to blackmail the company and/or damage
+ their repudiation by publishing them.
+
+ ERP-System Compromise: >
+ As a hacker I want to access the ERP-System in order to steal/modify sensitive business data.
+ Database Compromise: >
+ As a hacker I want to access the database backend of the ERP-System in order to steal/modify sensitive
+ business data.
+ Contract Filesystem Compromise: >
+ As a hacker I want to access the filesystem storing the contract PDFs in order to steal/modify contract data.
+ Cross-Site Scripting Attacks: >
+ As a hacker I want to execute Cross-Site Scripting (XSS) and similar attacks in order to takeover victim sessions and
+ cause reputational damage.
+ Denial-of-Service of Enduser Functionality: >
+ As a hacker I want to disturb the functionality of the enduser parts of the application in order to cause direct financial
+ damage (lower sales).
+ Denial-of-Service of ERP/DB Functionality: >
+ As a hacker I want to disturb the functionality of the ERP system and/or it's database in order to cause indirect
+ financial damage via unusable internal ERP features (not related to customer portal).
+
+
+security_requirements:
+ Input Validation: Strict input validation is required to reduce the overall attack surface.
+ Securing Administrative Access: Administrative access must be secured with strong encryption and multi-factor authentication.
+ EU-DSGVO: Mandatory EU-Datenschutzgrundverordnung
+
+
+# Tags can be used for anything, it's just a tag. Also risk rules can act based on tags if you like.
+# Tags can be used for example to name the products used (which is more concrete than the technology types that only specify the type)
+tags_available:
+ - linux
+ - apache
+ - mysql
+ - jboss
+ - keycloak
+ - jenkins
+ - git
+ - oracle
+ - some-erp
+ - vmware
+ - aws
+ - aws:ec2
+ - aws:s3
+
+
+
+
+data_assets:
+
+
+ Customer Contracts: &customer-contracts # this example shows the inheritance-like features of YAML
+ id: customer-contracts
+ description: Customer Contracts (PDF)
+ usage: business # values: business, devops
+ tags:
+ origin: Customer
+ owner: Company XYZ
+ quantity: many # values: very-few, few, many, very-many
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Contract data might contain financial data as well as personally identifiable information (PII). The integrity and
+ availability of contract data is required for clearing payment disputes.
+
+
+ Customer Contract Summaries:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: contract-summaries
+ description: Customer Contract Summaries
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Just some summaries.
+
+
+ Customer Operational Data:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: customer-operational-data
+ description: Customer Operational Data
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Customer operational data for using the portal are required to be available to offer the portal functionality
+ and are used in the backend transactions.
+
+
+ Customer Accounts:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: customer-accounts
+ description: Customer Accounts (including transient credentials when entered for checking them)
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Customer account data for using the portal are required to be available to offer the portal functionality.
+
+
+ Some Internal Business Data:
+ id: internal-business-data
+ description: Internal business data of the ERP system used unrelated to the customer-facing processes.
+ usage: business # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: few # values: very-few, few, many, very-many
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for
+ internal non-customer-portal-related stuff).
+
+
+ Client Application Code: &client-application-code # this example shows the inheritance-like features of YAML
+ id: client-application-code
+ description: Angular and other client-side code delivered by the application.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company ABC
+ owner: Company ABC
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: public # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The integrity of the public data is critical to avoid reputational damage and the availability is important on the
+ long-term scale (but not critical) to keep the growth rate of the customer base steady.
+
+
+ Server Application Code:
+ <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values
+ id: server-application-code
+ description: API and other server-side code of the application.
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The integrity of the API code is critical to avoid reputational damage and the availability is important on the
+ long-term scale (but not critical) to keep the growth rate of the customer base steady.
+
+
+ Build Job Config:
+ id: build-job-config
+ description: Data for customizing of the build job system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the build job system.
+
+
+ Marketing Material:
+ <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values
+ id: marketing-material
+ description: Website and marketing data to inform potential customers and generate new leads.
+ integrity: important # values: archive, operational, important, critical, mission-critical
+
+
+ ERP Logs:
+ id: erp-logs
+ description: Logs generated by the ERP system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: many # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: archive # values: archive, operational, important, critical, mission-critical
+ availability: archive # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard
+ transactional logs.
+
+
+ ERP Customizing Data:
+ id: erp-customizing
+ description: Data for customizing of the ERP system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the ERP system.
+
+
+ Database Customizing and Dumps:
+ id: db-dumps
+ description: Data for customizing of the DB system, which might include full database dumps.
+ usage: devops # values: business, devops
+ tags:
+ - oracle
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the DB system, which might include full database dumps.
+
+
+
+
+
+
+technical_assets:
+
+
+ Customer Web Client:
+ id: customer-client
+ description: Customer Web Client
+ type: external-entity # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by enduser customer
+ size: component # values: system, service, application, component
+ technology: browser # values: see help
+ tags:
+ internet: true
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Customer
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by the customer to access the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Customer Traffic:
+ target: load-balancer
+ description: Link to the load balancer
+ protocol: https # values: see help
+ authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ - marketing-material
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Backoffice Client:
+ id: backoffice-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Backoffice client
+ type: external-entity # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by Company XYZ company
+ size: component # values: system, service, application, component
+ technology: desktop # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company XYZ
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by Company XYZ to administer and use the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-contracts
+ - internal-business-data
+ - erp-logs
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ ERP Internal Access:
+ target: erp-system
+ description: Link to the ERP system
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ - some-erp
+ vpn: true
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-contracts
+ - internal-business-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Marketing CMS Editing:
+ target: marketing-cms
+ description: Link to the CMS for editing content
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: true
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - marketing-material
+ data_assets_received: # sequence of IDs to reference
+ - marketing-material
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Backend Admin Client:
+ id: backend-admin-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Backend admin client
+ type: external-entity # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by ops provider
+ size: component # values: system, service, application, component
+ technology: browser # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company XYZ
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by Company XYZ to administer the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - erp-logs
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ ERP Web Access:
+ target: erp-system
+ description: Link to the ERP system (Web)
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - erp-customizing
+ data_assets_received: # sequence of IDs to reference
+ - erp-logs
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ DB Update Access:
+ target: sql-database
+ description: Link to the database (JDBC tunneled via SSH)
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - db-dumps
+ data_assets_received: # sequence of IDs to reference
+ - db-dumps
+ - erp-logs
+ - customer-accounts
+ - customer-operational-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ User Management Access:
+ target: ldap-auth-server
+ description: Link to the LDAP auth server for managing users
+ protocol: ldaps # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Load Balancer:
+ id: load-balancer
+ #diagram_tweak_order: 50 # affects left to right positioning (only within a trust boundary)
+ description: Load Balancer (HA-Proxy)
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: load-balancer # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ
+ usages of the portal and ERP system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - client-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Web Application Traffic:
+ target: apache-webserver
+ description: Link to the web server
+ protocol: http # values: see help
+ authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+ CMS Content Traffic:
+ target: marketing-cms
+ description: Link to the CMS server
+ protocol: http # values: see help
+ authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: none # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ data_assets_received: # sequence of IDs to reference
+ - marketing-material
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+
+
+ Apache Webserver:
+ id: apache-webserver
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Apache Webserver hosting the API code and client-side code
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: application # values: system, service, application, component
+ technology: web-server # values: see help
+ tags:
+ - linux
+ - apache
+ - aws:ec2
+ internet: false
+ machine: container # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: true
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - json
+ - file
+ communication_links:
+ ERP System Traffic:
+ target: erp-system
+ description: Link to the ERP system
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+ Auth Credential Check Traffic:
+ target: identity-provider
+ description: Link to the identity provider server
+ protocol: https # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+
+
+ Identity Provider:
+ id: identity-provider
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Identity provider server
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: identity-provider # values: see help
+ tags:
+ - linux
+ - jboss
+ - keycloak
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The auth data of the application
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ LDAP Credential Check Traffic:
+ target: ldap-auth-server
+ description: Link to the LDAP server
+ protocol: ldaps # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+
+
+ LDAP Auth Server:
+ id: ldap-auth-server
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: LDAP authentication server
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: identity-store-ldap # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: transparent # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The auth data of the application
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ - customer-accounts
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+
+
+ Marketing CMS:
+ id: marketing-cms
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: CMS for the marketing content
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: application # values: system, service, application, component
+ technology: cms # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: container # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: true
+ data_assets_processed: # sequence of IDs to reference
+ - marketing-material
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ - marketing-material
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Auth Traffic:
+ target: ldap-auth-server
+ description: Link to the LDAP auth server
+ protocol: ldap # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+
+
+ Backoffice ERP System:
+ id: erp-system
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: ERP system
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: erp # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other
+ Company XYZ internal processes.
+ multi_tenant: false
+ redundant: true
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - erp-customizing
+ data_assets_stored: # sequence of IDs to reference
+ - erp-logs
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - xml
+ - file
+ - serialization
+ communication_links:
+ Database Traffic:
+ target: sql-database
+ description: Link to the DB system
+ protocol: jdbc # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ NFS Filesystem Access:
+ target: contract-fileserver
+ description: Link to the file system
+ protocol: nfs # values: see help
+ authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: none # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-contracts
+ data_assets_received: # sequence of IDs to reference
+ - customer-contracts
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Contract Fileserver:
+ id: contract-fileserver
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: NFS Filesystem for storing the contract PDFs
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: file-server # values: see help
+ tags:
+ - linux
+ - aws:s3
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Contract data might contain financial data as well as personally identifiable information (PII). The integrity and
+ availability of contract data is required for clearing payment disputes. The filesystem is also required to be available
+ for storing new contracts of freshly generated customers.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ data_assets_stored: # sequence of IDs to reference
+ - customer-contracts
+ - contract-summaries
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
+
+
+ Customer Contract Database:
+ id: sql-database
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: The database behind the ERP system
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: database # values: see help
+ tags:
+ - linux
+ - mysql
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: data-with-symmetric-shared-key # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also
+ for other Company XYZ internal processes.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - db-dumps
+ data_assets_stored: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+
+
+ External Development Client:
+ id: external-dev-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: External developer client
+ type: external-entity # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by external developers
+ size: system # values: system, service, application, component
+ technology: devops-client # values: see help
+ tags:
+ - linux
+ internet: true
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: External Developers
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The clients used by external developers to create parts of the application code.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
+ Git-Repo Code Write Access:
+ target: git-repo
+ description: Link to the Git repo
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Git-Repo Web-UI Access:
+ target: git-repo
+ description: Link to the Git repo
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Jenkins Web-UI Access:
+ target: jenkins-buildserver
+ description: Link to the Jenkins build server
+ protocol: https # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - build-job-config
+ data_assets_received: # sequence of IDs to reference
+ - build-job-config
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Git Repository:
+ id: git-repo
+ #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary)
+ description: Git repository server
+ type: process # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: sourcecode-repository # values: see help
+ tags:
+ - linux
+ - git
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is
+ therefore rated as confidential.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
+
+
+ Jenkins Buildserver:
+ id: jenkins-buildserver
+ #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary)
+ description: Jenkins buildserver
+ type: process # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: build-pipeline # values: see help
+ tags:
+ - linux
+ - jenkins
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is
+ therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk
+ of reputation damage and application update unavailability when the build pipeline is compromised.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - build-job-config
+ - client-application-code
+ - server-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ - build-job-config
+ - client-application-code
+ - server-application-code
+ - marketing-material
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ - serialization
+ communication_links:
+ Git Repo Code Read Access:
+ target: git-repo
+ description: Link to the Git repository server
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Application Deployment:
+ target: apache-webserver
+ description: Link to the Apache webserver
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ CMS Updates:
+ target: marketing-cms
+ description: Link to the CMS
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - marketing-material
+ data_assets_received: # sequence of IDs to reference
+
+
+
+
+
+trust_boundaries:
+
+
+ Web DMZ:
+ id: web-dmz
+ description: Web DMZ
+ type: network-cloud-security-group # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - apache-webserver
+ - marketing-cms
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ ERP DMZ:
+ id: erp-dmz
+ description: ERP DMZ
+ type: network-cloud-security-group # values: see help
+ tags:
+ - some-erp
+ technical_assets_inside: # sequence of IDs to reference
+ - erp-system
+ - contract-fileserver
+ - sql-database
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ Application Network:
+ id: application-network
+ description: Application Network
+ type: network-cloud-provider # values: see help
+ tags:
+ - aws
+ technical_assets_inside: # sequence of IDs to reference
+ - load-balancer
+ trust_boundaries_nested: # sequence of IDs to reference
+ - web-dmz
+ - erp-dmz
+ - auth-env
+
+
+ Auth Handling Environment:
+ id: auth-env
+ description: Auth Handling Environment
+ type: execution-environment # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - identity-provider
+ - ldap-auth-server
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ Dev Network:
+ id: dev-network
+ description: Development Network
+ type: network-on-prem # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - jenkins-buildserver
+ - git-repo
+ - backend-admin-client
+ - backoffice-client
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+
+
+
+shared_runtimes:
+
+
+ WebApp and Backoffice Virtualization:
+ id: webapp-virtualization
+ description: WebApp Virtualization
+ tags:
+ - vmware
+ technical_assets_running: # sequence of IDs to reference
+ - apache-webserver
+ - marketing-cms
+ - erp-system
+ - contract-fileserver
+ - sql-database
+
+
+
+
+individual_risk_categories: # used for adding custom manually identified risks
+
+
+ Some Individual Risk Example:
+ id: something-strange
+ description: Some text describing the risk category...
+ impact: Some text describing the impact...
+ asvs: V0 - Something Strange
+ cheat_sheet: https://example.com
+ action: Some text describing the action...
+ mitigation: Some text describing the mitigation...
+ check: Check if XYZ...
+ function: business-side # values: business-side, architecture, development, operations
+ stride: repudiation # values: spoofing, tampering, repudiation, information-disclosure, denial-of-service, elevation-of-privilege
+ detection_logic: Some text describing the detection logic...
+ risk_assessment: Some text describing the risk assessment...
+ false_positives: Some text describing the most common types of false positives...
+ model_failure_possible_reason: false
+ cwe: 693
+ risks_identified:
+ Example Individual Risk at Database:
+ severity: critical # values: low, medium, elevated, high, critical
+ exploitation_likelihood: likely # values: unlikely, likely, very-likely, frequent
+ exploitation_impact: medium # values: low, medium, high, very-high
+ data_breach_probability: probable # values: improbable, possible, probable
+ data_breach_technical_assets: # list of technical asset IDs which might have data breach
+ - sql-database
+ most_relevant_data_asset:
+ most_relevant_technical_asset: sql-database
+ most_relevant_communication_link:
+ most_relevant_trust_boundary:
+ most_relevant_shared_runtime:
+ Example Individual Risk at Contract Filesystem:
+ severity: medium # values: low, medium, elevated, high, critical
+ exploitation_likelihood: frequent # values: unlikely, likely, very-likely, frequent
+ exploitation_impact: very-high # values: low, medium, high, very-high
+ data_breach_probability: improbable # values: improbable, possible, probable
+ data_breach_technical_assets: # list of technical asset IDs which might have data breach
+ most_relevant_data_asset:
+ most_relevant_technical_asset: contract-fileserver
+ most_relevant_communication_link:
+ most_relevant_trust_boundary:
+ most_relevant_shared_runtime:
+
+
+
+# NOTE:
+# For risk tracking each risk-id needs to be defined (the string with the @ sign in it). These unique risk IDs
+# are visible in the PDF report (the small grey string under each risk), the Excel (column "ID"), as well as the JSON responses.
+# Some risk IDs have only one @ sign in them, while others multiple. The idea is to allow for unique but still speaking IDs.
+# Therefore each risk instance creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part.
+# Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. Best is to lookup the IDs
+# to use in the created Excel file. Alternatively a model macro "seed-risk-tracking" is available that helps in initially
+# seeding the risk tracking part here based on already identified and not yet handled risks.
+risk_tracking:
+
+ untrusted-deserialization@erp-system: # wildcards "*" between the @ characters are possible
+ status: accepted # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: Risk accepted as tolerable
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ ldap-injection@*@ldap-auth-server@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-5678
+ date: 2020-01-05
+ checked_by: John Doe
+
+ unencrypted-asset@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ missing-authentication-second-factor@*@*@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ missing-hardening@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ dos-risky-access-across-trust-boundary@*@*@*: # wildcards "*" between the @ characters are possible
+ status: in-progress # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures are being implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+
+
+#diagram_tweak_edge_layout: spline # values: spline, polyline, false, ortho (this suppresses edge labels), curved (this suppresses edge labels and can cause problems with edges)
+
+#diagram_tweak_suppress_edge_labels: true
+#diagram_tweak_layout_left_to_right: true
+#diagram_tweak_nodesep: 2
+#diagram_tweak_ranksep: 2
+#diagram_tweak_invisible_connections_between_assets:
+# - tech-asset-source-id-A:tech-asset-target-id-B
+# - tech-asset-source-id-C:tech-asset-target-id-D
+#diagram_tweak_same_rank_assets:
+# - tech-asset-source-id-E:tech-asset-target-id-F:tech-asset-source-id-G:tech-asset-target-id-H
+# - tech-asset-source-id-M:tech-asset-target-id-N:tech-asset-source-id-O
diff --git a/test/data_assets.yaml b/test/data_assets.yaml
new file mode 100644
index 00000000..43bc0c7d
--- /dev/null
+++ b/test/data_assets.yaml
@@ -0,0 +1,164 @@
+
+data_assets:
+
+
+ Customer Contracts: &customer-contracts # this example shows the inheritance-like features of YAML
+ id: customer-contracts
+ description: Customer Contracts (PDF)
+ usage: business # values: business, devops
+ tags:
+ origin: Customer
+ owner: Company XYZ
+ quantity: many # values: very-few, few, many, very-many
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Contract data might contain financial data as well as personally identifiable information (PII). The integrity and
+ availability of contract data is required for clearing payment disputes.
+
+
+ Customer Contract Summaries:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: contract-summaries
+ description: Customer Contract Summaries
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Just some summaries.
+
+
+ Customer Operational Data:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: customer-operational-data
+ description: Customer Operational Data
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Customer operational data for using the portal are required to be available to offer the portal functionality
+ and are used in the backend transactions.
+
+
+ Customer Accounts:
+ <<: *customer-contracts # here we're referencing the above created asset as base and just overwrite few values
+ id: customer-accounts
+ description: Customer Accounts (including transient credentials when entered for checking them)
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Customer account data for using the portal are required to be available to offer the portal functionality.
+
+
+ Some Internal Business Data:
+ id: internal-business-data
+ description: Internal business data of the ERP system used unrelated to the customer-facing processes.
+ usage: business # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: few # values: very-few, few, many, very-many
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data used and/or generated during unrelated other usecases of the ERP-system (when used also by Company XYZ for
+ internal non-customer-portal-related stuff).
+
+
+ Client Application Code: &client-application-code # this example shows the inheritance-like features of YAML
+ id: client-application-code
+ description: Angular and other client-side code delivered by the application.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company ABC
+ owner: Company ABC
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: public # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The integrity of the public data is critical to avoid reputational damage and the availability is important on the
+ long-term scale (but not critical) to keep the growth rate of the customer base steady.
+
+
+ Server Application Code:
+ <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values
+ id: server-application-code
+ description: API and other server-side code of the application.
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The integrity of the API code is critical to avoid reputational damage and the availability is important on the
+ long-term scale (but not critical) to keep the growth rate of the customer base steady.
+
+
+ Build Job Config:
+ id: build-job-config
+ description: Data for customizing of the build job system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the build job system.
+
+
+ Marketing Material:
+ <<: *client-application-code # here we're referencing the above created asset as base and just overwrite few values
+ id: marketing-material
+ description: Website and marketing data to inform potential customers and generate new leads.
+ integrity: important # values: archive, operational, important, critical, mission-critical
+
+
+ ERP Logs:
+ id: erp-logs
+ description: Logs generated by the ERP system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: many # values: very-few, few, many, very-many
+ confidentiality: restricted # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: archive # values: archive, operational, important, critical, mission-critical
+ availability: archive # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Logs should not contain PII data and are only required for failure analysis, i.e. they are not considered as hard
+ transactional logs.
+
+
+ ERP Customizing Data:
+ id: erp-customizing
+ description: Data for customizing of the ERP system.
+ usage: devops # values: business, devops
+ tags:
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the ERP system.
+
+
+ Database Customizing and Dumps:
+ id: db-dumps
+ description: Data for customizing of the DB system, which might include full database dumps.
+ usage: devops # values: business, devops
+ tags:
+ - oracle
+ origin: Company XYZ
+ owner: Company XYZ
+ quantity: very-few # values: very-few, few, many, very-many
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Data for customizing of the DB system, which might include full database dumps.
+
diff --git a/test/diagram_tweak.yaml b/test/diagram_tweak.yaml
new file mode 100644
index 00000000..eff09615
--- /dev/null
+++ b/test/diagram_tweak.yaml
@@ -0,0 +1,13 @@
+
+#diagram_tweak_edge_layout: spline # values: spline, polyline, false, ortho (this suppresses edge labels), curved (this suppresses edge labels and can cause problems with edges)
+
+#diagram_tweak_suppress_edge_labels: true
+#diagram_tweak_layout_left_to_right: true
+#diagram_tweak_nodesep: 2
+#diagram_tweak_ranksep: 2
+#diagram_tweak_invisible_connections_between_assets:
+# - tech-asset-source-id-A:tech-asset-target-id-B
+# - tech-asset-source-id-C:tech-asset-target-id-D
+#diagram_tweak_same_rank_assets:
+# - tech-asset-source-id-E:tech-asset-target-id-F:tech-asset-source-id-G:tech-asset-target-id-H
+# - tech-asset-source-id-M:tech-asset-target-id-N:tech-asset-source-id-O
diff --git a/test/main.yaml b/test/main.yaml
new file mode 100644
index 00000000..a90256a3
--- /dev/null
+++ b/test/main.yaml
@@ -0,0 +1,27 @@
+threagile_version: 1.0.0
+
+# NOTE:
+#
+# For a perfect editing experience within your IDE of choice you can easily
+# get model syntax validation and autocompletion (very handy for enum values)
+# as well as live templates: Just import the schema.json into your IDE and assign
+# it as "schema" to each Threagile YAML file. Also try to import individual parts
+# from the live-templates.txt file into your IDE as live editing templates.
+#
+# You might also want to try the REST API when running in server mode...
+
+includes:
+ - meta.yaml
+ - overview.yaml
+ - questions.yaml
+ - abuse_cases.yaml
+ - security_requirements.yaml
+ - tags.yaml
+ - data_assets.yaml
+ - technical_assets.yaml
+ - trust_boundaries.yaml
+ - shared_runtimes.yaml
+ - risk_categories.yaml
+ - risk_tracking.yaml
+ - diagram_tweak.yaml
+
diff --git a/test/meta.yaml b/test/meta.yaml
new file mode 100644
index 00000000..c368ce1a
--- /dev/null
+++ b/test/meta.yaml
@@ -0,0 +1,9 @@
+title: Some Example Application
+
+date: 2020-07-01
+
+author:
+ name: John Doe
+ homepage: www.example.com
+
+business_criticality: important # values: archive, operational, important, critical, mission-critical
diff --git a/test/overview.yaml b/test/overview.yaml
new file mode 100644
index 00000000..52b83ac7
--- /dev/null
+++ b/test/overview.yaml
@@ -0,0 +1,16 @@
+management_summary_comment: >
+ Just some more custom summary possible here...
+
+
+business_overview:
+ description: Some more demo text here and even images...
+ images:
+# - custom-image-1.png: Some dummy image 1
+# - custom-image-2.png: Some dummy image 2
+
+
+technical_overview:
+ description: Some more demo text here and even images...
+ images:
+# - custom-image-1.png: Some dummy image 1
+# - custom-image-2.png: Some dummy image 2
diff --git a/test/questions.yaml b/test/questions.yaml
new file mode 100644
index 00000000..4f8fc38a
--- /dev/null
+++ b/test/questions.yaml
@@ -0,0 +1,6 @@
+questions: # simply use "" as answer to signal "unanswered"
+ How are the admin clients managed/protected against compromise?: ""
+ How are the development clients managed/protected against compromise?: >
+ Managed by XYZ
+ How are the build pipeline components managed/protected against compromise?: >
+ Managed by XYZ
diff --git a/test/risk_categories.yaml b/test/risk_categories.yaml
new file mode 100644
index 00000000..e63c87e8
--- /dev/null
+++ b/test/risk_categories.yaml
@@ -0,0 +1,44 @@
+
+individual_risk_categories: # used for adding custom manually identified risks
+
+ Some Individual Risk Example:
+ id: something-strange
+ description: Some text describing the risk category...
+ impact: Some text describing the impact...
+ asvs: V0 - Something Strange
+ cheat_sheet: https://example.com
+ action: Some text describing the action...
+ mitigation: Some text describing the mitigation...
+ check: Check if XYZ...
+ function: business-side # values: business-side, architecture, development, operations
+ stride: repudiation # values: spoofing, tampering, repudiation, information-disclosure, denial-of-service, elevation-of-privilege
+ detection_logic: Some text describing the detection logic...
+ risk_assessment: Some text describing the risk assessment...
+ false_positives: Some text describing the most common types of false positives...
+ model_failure_possible_reason: false
+ cwe: 693
+ risks_identified:
+ Example Individual Risk at Database:
+ severity: critical # values: low, medium, elevated, high, critical
+ exploitation_likelihood: likely # values: unlikely, likely, very-likely, frequent
+ exploitation_impact: medium # values: low, medium, high, very-high
+ data_breach_probability: probable # values: improbable, possible, probable
+ data_breach_technical_assets: # list of technical asset IDs which might have data breach
+ - sql-database
+ most_relevant_data_asset:
+ most_relevant_technical_asset: sql-database
+ most_relevant_communication_link:
+ most_relevant_trust_boundary:
+ most_relevant_shared_runtime:
+ Example Individual Risk at Contract Filesystem:
+ severity: medium # values: low, medium, elevated, high, critical
+ exploitation_likelihood: frequent # values: unlikely, likely, very-likely, frequent
+ exploitation_impact: very-high # values: low, medium, high, very-high
+ data_breach_probability: improbable # values: improbable, possible, probable
+ data_breach_technical_assets: # list of technical asset IDs which might have data breach
+ most_relevant_data_asset:
+ most_relevant_technical_asset: contract-fileserver
+ most_relevant_communication_link:
+ most_relevant_trust_boundary:
+ most_relevant_shared_runtime:
+
diff --git a/test/risk_tracking.yaml b/test/risk_tracking.yaml
new file mode 100644
index 00000000..30c84a11
--- /dev/null
+++ b/test/risk_tracking.yaml
@@ -0,0 +1,52 @@
+
+# NOTE:
+# For risk tracking each risk-id needs to be defined (the string with the @ sign in it). These unique risk IDs
+# are visible in the PDF report (the small grey string under each risk), the Excel (column "ID"), as well as the JSON responses.
+# Some risk IDs have only one @ sign in them, while others multiple. The idea is to allow for unique but still speaking IDs.
+# Therefore each risk instance creates its individual ID by taking all affected elements causing the risk to be within an @-delimited part.
+# Using wildcards (the * sign) for parts delimited by @ signs allows to handle groups of certain risks at once. Best is to lookup the IDs
+# to use in the created Excel file. Alternatively a model macro "seed-risk-tracking" is available that helps in initially
+# seeding the risk tracking part here based on already identified and not yet handled risks.
+risk_tracking:
+
+ untrusted-deserialization@erp-system: # wildcards "*" between the @ characters are possible
+ status: accepted # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: Risk accepted as tolerable
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ ldap-injection@*@ldap-auth-server@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-5678
+ date: 2020-01-05
+ checked_by: John Doe
+
+ unencrypted-asset@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ missing-authentication-second-factor@*@*@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ missing-hardening@*: # wildcards "*" between the @ characters are possible
+ status: mitigated # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures were implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
+
+ dos-risky-access-across-trust-boundary@*@*@*: # wildcards "*" between the @ characters are possible
+ status: in-progress # values: unchecked, in-discussion, accepted, in-progress, mitigated, false-positive
+ justification: The hardening measures are being implemented and checked
+ ticket: XYZ-1234
+ date: 2020-01-04
+ checked_by: John Doe
diff --git a/test/security_requirements.yaml b/test/security_requirements.yaml
new file mode 100644
index 00000000..d8905dfc
--- /dev/null
+++ b/test/security_requirements.yaml
@@ -0,0 +1,4 @@
+security_requirements:
+ Input Validation: Strict input validation is required to reduce the overall attack surface.
+ Securing Administrative Access: Administrative access must be secured with strong encryption and multi-factor authentication.
+ EU-DSGVO: Mandatory EU-Datenschutzgrundverordnung
diff --git a/test/shared_runtimes.yaml b/test/shared_runtimes.yaml
new file mode 100644
index 00000000..c9bee4d6
--- /dev/null
+++ b/test/shared_runtimes.yaml
@@ -0,0 +1,16 @@
+
+shared_runtimes:
+
+
+ WebApp and Backoffice Virtualization:
+ id: webapp-virtualization
+ description: WebApp Virtualization
+ tags:
+ - vmware
+ technical_assets_running: # sequence of IDs to reference
+ - apache-webserver
+ - marketing-cms
+ - erp-system
+ - contract-fileserver
+ - sql-database
+
diff --git a/test/tags.yaml b/test/tags.yaml
new file mode 100644
index 00000000..527412e5
--- /dev/null
+++ b/test/tags.yaml
@@ -0,0 +1,16 @@
+# Tags can be used for anything, it's just a tag. Also risk rules can act based on tags if you like.
+# Tags can be used for example to name the products used (which is more concrete than the technology types that only specify the type)
+tags_available:
+ - linux
+ - apache
+ - mysql
+ - jboss
+ - keycloak
+ - jenkins
+ - git
+ - oracle
+ - some-erp
+ - vmware
+ - aws
+ - aws:ec2
+ - aws:s3
diff --git a/test/technical_assets.yaml b/test/technical_assets.yaml
new file mode 100644
index 00000000..427d2806
--- /dev/null
+++ b/test/technical_assets.yaml
@@ -0,0 +1,6 @@
+includes:
+ - technical_assets_clients.yaml
+ - technical_assets_infrastructure.yaml
+ - technical_assets_servers.yaml
+ - technical_assets_databases.yaml
+ - technical_assets_devops.yaml
diff --git a/test/technical_assets_clients.yaml b/test/technical_assets_clients.yaml
new file mode 100644
index 00000000..d9091f13
--- /dev/null
+++ b/test/technical_assets_clients.yaml
@@ -0,0 +1,211 @@
+
+technical_assets:
+
+ Customer Web Client:
+ id: customer-client
+ description: Customer Web Client
+ type: external-entity # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by enduser customer
+ size: component # values: system, service, application, component
+ technology: browser # values: see help
+ tags:
+ internet: true
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Customer
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by the customer to access the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Customer Traffic:
+ target: load-balancer
+ description: Link to the load balancer
+ protocol: https # values: see help
+ authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ - marketing-material
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Backoffice Client:
+ id: backoffice-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Backoffice client
+ type: external-entity # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by Company XYZ company
+ size: component # values: system, service, application, component
+ technology: desktop # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company XYZ
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by Company XYZ to administer and use the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-contracts
+ - internal-business-data
+ - erp-logs
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ ERP Internal Access:
+ target: erp-system
+ description: Link to the ERP system
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ - some-erp
+ vpn: true
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-contracts
+ - internal-business-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Marketing CMS Editing:
+ target: marketing-cms
+ description: Link to the CMS for editing content
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: true
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - marketing-material
+ data_assets_received: # sequence of IDs to reference
+ - marketing-material
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Backend Admin Client:
+ id: backend-admin-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Backend admin client
+ type: external-entity # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by ops provider
+ size: component # values: system, service, application, component
+ technology: browser # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company XYZ
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: operational # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The client used by Company XYZ to administer the system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - erp-logs
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ ERP Web Access:
+ target: erp-system
+ description: Link to the ERP system (Web)
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - erp-customizing
+ data_assets_received: # sequence of IDs to reference
+ - erp-logs
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ DB Update Access:
+ target: sql-database
+ description: Link to the database (JDBC tunneled via SSH)
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - db-dumps
+ data_assets_received: # sequence of IDs to reference
+ - db-dumps
+ - erp-logs
+ - customer-accounts
+ - customer-operational-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ User Management Access:
+ target: ldap-auth-server
+ description: Link to the LDAP auth server for managing users
+ protocol: ldaps # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
diff --git a/test/technical_assets_databases.yaml b/test/technical_assets_databases.yaml
new file mode 100644
index 00000000..54fe362c
--- /dev/null
+++ b/test/technical_assets_databases.yaml
@@ -0,0 +1,71 @@
+
+technical_assets:
+
+ LDAP Auth Server:
+ id: ldap-auth-server
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: LDAP authentication server
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: identity-store-ldap # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: transparent # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The auth data of the application
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ - customer-accounts
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+
+
+ Customer Contract Database:
+ id: sql-database
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: The database behind the ERP system
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: database # values: see help
+ tags:
+ - linux
+ - mysql
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: data-with-symmetric-shared-key # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The ERP system's database contains business-relevant sensitive data for the leasing processes and eventually also
+ for other Company XYZ internal processes.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - db-dumps
+ data_assets_stored: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
diff --git a/test/technical_assets_devops.yaml b/test/technical_assets_devops.yaml
new file mode 100644
index 00000000..a6c0aa49
--- /dev/null
+++ b/test/technical_assets_devops.yaml
@@ -0,0 +1,223 @@
+includes:
+ - technical_assets_clients.yaml
+
+technical_assets:
+
+ External Development Client:
+ id: external-dev-client
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: External developer client
+ type: external-entity # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: true
+ out_of_scope: true
+ justification_out_of_scope: Owned and managed by external developers
+ size: system # values: system, service, application, component
+ technology: devops-client # values: see help
+ tags:
+ - linux
+ internet: true
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: External Developers
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: operational # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The clients used by external developers to create parts of the application code.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
+ Git-Repo Code Write Access:
+ target: git-repo
+ description: Link to the Git repo
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Git-Repo Web-UI Access:
+ target: git-repo
+ description: Link to the Git repo
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Jenkins Web-UI Access:
+ target: jenkins-buildserver
+ description: Link to the Jenkins build server
+ protocol: https # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - build-job-config
+ data_assets_received: # sequence of IDs to reference
+ - build-job-config
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Git Repository:
+ id: git-repo
+ #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary)
+ description: Git repository server
+ type: process # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: sourcecode-repository # values: see help
+ tags:
+ - linux
+ - git
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The code repo pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is
+ therefore rated as confidential.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
+
+
+ Jenkins Buildserver:
+ id: jenkins-buildserver
+ #diagram_tweak_order: 99 # affects left to right positioning (only within a trust boundary)
+ description: Jenkins buildserver
+ type: process # values: external-entity, process, datastore
+ usage: devops # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: build-pipeline # values: see help
+ tags:
+ - linux
+ - jenkins
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The build pipeline might contain sensitive configuration values like backend credentials, certificates etc. and is
+ therefore rated as confidential. The integrity and availability is rated as critical and important due to the risk
+ of reputation damage and application update unavailability when the build pipeline is compromised.
+ multi_tenant: true
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - build-job-config
+ - client-application-code
+ - server-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ - build-job-config
+ - client-application-code
+ - server-application-code
+ - marketing-material
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ - serialization
+ communication_links:
+ Git Repo Code Read Access:
+ target: git-repo
+ description: Link to the Git repository server
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ data_assets_received: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ Application Deployment:
+ target: apache-webserver
+ description: Link to the Apache webserver
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_assets_received: # sequence of IDs to reference
+ CMS Updates:
+ target: marketing-cms
+ description: Link to the CMS
+ protocol: ssh # values: see help
+ authentication: client-certificate # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: devops # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - marketing-material
+ data_assets_received: # sequence of IDs to reference
+
diff --git a/test/technical_assets_infrastructure.yaml b/test/technical_assets_infrastructure.yaml
new file mode 100644
index 00000000..00e7b697
--- /dev/null
+++ b/test/technical_assets_infrastructure.yaml
@@ -0,0 +1,75 @@
+
+technical_assets:
+
+ Load Balancer:
+ id: load-balancer
+ #diagram_tweak_order: 50 # affects left to right positioning (only within a trust boundary)
+ description: Load Balancer (HA-Proxy)
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: load-balancer # values: see help
+ tags:
+ internet: false
+ machine: physical # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the load balancer is mandatory for all customer and Company XYZ
+ usages of the portal and ERP system.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - client-application-code
+ - marketing-material
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Web Application Traffic:
+ target: apache-webserver
+ description: Link to the web server
+ protocol: http # values: see help
+ authentication: session-id # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: enduser-identity-propagation # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - client-application-code
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+ CMS Content Traffic:
+ target: marketing-cms
+ description: Link to the CMS server
+ protocol: http # values: see help
+ authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: none # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ data_assets_received: # sequence of IDs to reference
+ - marketing-material
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
diff --git a/test/technical_assets_servers.yaml b/test/technical_assets_servers.yaml
new file mode 100644
index 00000000..b8e025b3
--- /dev/null
+++ b/test/technical_assets_servers.yaml
@@ -0,0 +1,295 @@
+
+technical_assets:
+
+ Apache Webserver:
+ id: apache-webserver
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Apache Webserver hosting the API code and client-side code
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: application # values: system, service, application, component
+ technology: web-server # values: see help
+ tags:
+ - linux
+ - apache
+ - aws:ec2
+ internet: false
+ machine: container # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: true
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - client-application-code
+ - server-application-code
+ data_assets_stored: # sequence of IDs to reference
+ - client-application-code
+ - server-application-code
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - json
+ - file
+ communication_links:
+ ERP System Traffic:
+ target: erp-system
+ description: Link to the ERP system
+ protocol: https # values: see help
+ authentication: token # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+ Auth Credential Check Traffic:
+ target: identity-provider
+ description: Link to the identity provider server
+ protocol: https # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+
+
+ Identity Provider:
+ id: identity-provider
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: Identity provider server
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: identity-provider # values: see help
+ tags:
+ - linux
+ - jboss
+ - keycloak
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The auth data of the application
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ LDAP Credential Check Traffic:
+ target: ldap-auth-server
+ description: Link to the LDAP server
+ protocol: ldaps # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+
+
+ Marketing CMS:
+ id: marketing-cms
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: CMS for the marketing content
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: application # values: system, service, application, component
+ technology: cms # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: container # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: internal # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: important # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The correct configuration and reachability of the web server is mandatory for all customer usages of the portal.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: true
+ data_assets_processed: # sequence of IDs to reference
+ - marketing-material
+ - customer-accounts
+ data_assets_stored: # sequence of IDs to reference
+ - marketing-material
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ communication_links:
+ Auth Traffic:
+ target: ldap-auth-server
+ description: Link to the LDAP auth server
+ protocol: ldap # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: true
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ #diagram_tweak_weight: 5
+ #diagram_tweak_constraint: false
+
+
+ Backoffice ERP System:
+ id: erp-system
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: ERP system
+ type: process # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: system # values: system, service, application, component
+ technology: erp # values: see help
+ tags:
+ - linux
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: strictly-confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: mission-critical # values: archive, operational, important, critical, mission-critical
+ availability: mission-critical # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ The ERP system contains business-relevant sensitive data for the leasing processes and eventually also for other
+ Company XYZ internal processes.
+ multi_tenant: false
+ redundant: true
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - customer-contracts
+ - internal-business-data
+ - erp-customizing
+ data_assets_stored: # sequence of IDs to reference
+ - erp-logs
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - xml
+ - file
+ - serialization
+ communication_links:
+ Database Traffic:
+ target: sql-database
+ description: Link to the DB system
+ protocol: jdbc # values: see help
+ authentication: credentials # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: technical-user # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ data_assets_received: # sequence of IDs to reference
+ - customer-accounts
+ - customer-operational-data
+ - internal-business-data
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+ NFS Filesystem Access:
+ target: contract-fileserver
+ description: Link to the file system
+ protocol: nfs # values: see help
+ authentication: none # values: none, credentials, session-id, token, client-certificate, two-factor
+ authorization: none # values: none, technical-user, enduser-identity-propagation
+ tags:
+ vpn: false
+ ip_filtered: false
+ readonly: false
+ usage: business # values: business, devops
+ data_assets_sent: # sequence of IDs to reference
+ - customer-contracts
+ data_assets_received: # sequence of IDs to reference
+ - customer-contracts
+ #diagram_tweak_weight: 1
+ #diagram_tweak_constraint: false
+
+
+ Contract Fileserver:
+ id: contract-fileserver
+ #diagram_tweak_order: 0 # affects left to right positioning (only within a trust boundary)
+ description: NFS Filesystem for storing the contract PDFs
+ type: datastore # values: external-entity, process, datastore
+ usage: business # values: business, devops
+ used_as_client_by_human: false
+ out_of_scope: false
+ justification_out_of_scope:
+ size: component # values: system, service, application, component
+ technology: file-server # values: see help
+ tags:
+ - linux
+ - aws:s3
+ internet: false
+ machine: virtual # values: physical, virtual, container, serverless
+ encryption: none # values: none, transparent, data-with-symmetric-shared-key, data-with-asymmetric-shared-key, data-with-enduser-individual-key
+ owner: Company ABC
+ confidentiality: confidential # values: public, internal, restricted, confidential, strictly-confidential
+ integrity: critical # values: archive, operational, important, critical, mission-critical
+ availability: important # values: archive, operational, important, critical, mission-critical
+ justification_cia_rating: >
+ Contract data might contain financial data as well as personally identifiable information (PII). The integrity and
+ availability of contract data is required for clearing payment disputes. The filesystem is also required to be available
+ for storing new contracts of freshly generated customers.
+ multi_tenant: false
+ redundant: false
+ custom_developed_parts: false
+ data_assets_processed: # sequence of IDs to reference
+ data_assets_stored: # sequence of IDs to reference
+ - customer-contracts
+ - contract-summaries
+ data_formats_accepted: # sequence of formats like: json, xml, serialization, file, csv
+ - file
+ communication_links:
diff --git a/test/trust_boundaries.yaml b/test/trust_boundaries.yaml
new file mode 100644
index 00000000..b6ae70fd
--- /dev/null
+++ b/test/trust_boundaries.yaml
@@ -0,0 +1,67 @@
+
+trust_boundaries:
+
+
+ Web DMZ:
+ id: web-dmz
+ description: Web DMZ
+ type: network-cloud-security-group # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - apache-webserver
+ - marketing-cms
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ ERP DMZ:
+ id: erp-dmz
+ description: ERP DMZ
+ type: network-cloud-security-group # values: see help
+ tags:
+ - some-erp
+ technical_assets_inside: # sequence of IDs to reference
+ - erp-system
+ - contract-fileserver
+ - sql-database
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ Application Network:
+ id: application-network
+ description: Application Network
+ type: network-cloud-provider # values: see help
+ tags:
+ - aws
+ technical_assets_inside: # sequence of IDs to reference
+ - load-balancer
+ trust_boundaries_nested: # sequence of IDs to reference
+ - web-dmz
+ - erp-dmz
+ - auth-env
+
+
+ Auth Handling Environment:
+ id: auth-env
+ description: Auth Handling Environment
+ type: execution-environment # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - identity-provider
+ - ldap-auth-server
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+ Dev Network:
+ id: dev-network
+ description: Development Network
+ type: network-on-prem # values: see help
+ tags:
+ technical_assets_inside: # sequence of IDs to reference
+ - jenkins-buildserver
+ - git-repo
+ - backend-admin-client
+ - backoffice-client
+ trust_boundaries_nested: # sequence of IDs to reference
+
+
+