Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding ipv6 support for bgp router peer, router interface and router. #10204

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .ci/gcb-push-downstream.yml
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ steps:
entrypoint: '/workspace/.ci/scripts/go-plus/vcr-cassette-merger/vcr_merge.sh'
secretEnv: ["GITHUB_TOKEN_CLASSIC", "GOOGLE_PROJECT"]
id: vcr-merge
waitFor: ["tpg-push"]
waitFor: ["tpg-push", "tpgb-push", "tgc-push", "tf-oics-push"]
env:
- BASE_BRANCH=$BRANCH_NAME
args:
Expand Down
10 changes: 6 additions & 4 deletions .ci/magician/cmd/request_reviewer.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,12 @@ func execRequestReviewer(prNumber string, gh GithubClient) {

reviewersToRequest, newPrimaryReviewer := github.ChooseCoreReviewers(requestedReviewers, previousReviewers)

err = gh.RequestPullRequestReviewers(prNumber, reviewersToRequest)
if err != nil {
fmt.Println(err)
os.Exit(1)
if len(reviewersToRequest) > 0 {
err = gh.RequestPullRequestReviewers(prNumber, reviewersToRequest)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}

if newPrimaryReviewer != "" {
Expand Down
3 changes: 3 additions & 0 deletions .ci/magician/cmd/request_reviewer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@ func TestExecRequestReviewer(t *testing.T) {

if tc.expectSpecificReviewers != nil {
assert.ElementsMatch(t, tc.expectSpecificReviewers, actualReviewers)
if len(tc.expectSpecificReviewers) == 0 {
assert.Len(t, gh.calledMethods["RequestPullRequestReviewers"], 0)
}
}
if tc.expectReviewersFromList != nil {
for _, reviewer := range actualReviewers {
Expand Down
10 changes: 10 additions & 0 deletions mmv1/products/compute/Router.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,16 @@ properties:
between the two peers. If set, this value must be between 20 and 60.
The default is 20.
default_value: 20
- !ruby/object:Api::Type::String
name: identifierRange
default_from_api: true
min_version: beta
description: |
Explicitly specifies a range of valid BGP Identifiers for this Router.
It is provided as a link-local IPv4 range (from 169.254.0.0/16), of
size at least /30, even if the BGP sessions are over IPv6. It must
not overlap with any IPv4 BGP session ranges.Other vendors commonly
call this router ID.
- !ruby/object:Api::Type::Boolean
name: encryptedInterconnectRouter
immutable: true
Expand Down
11 changes: 11 additions & 0 deletions mmv1/products/compute/RouterNat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,17 @@ properties:
- :ERRORS_ONLY
- :TRANSLATIONS_ONLY
- :ALL
- !ruby/object:Api::Type::Array
name: 'endpointTypes'
immutable: true
min_size: 1
description: |
Specifies the endpoint Types supported by the NAT Gateway.
Supported values include:
`ENDPOINT_TYPE_VM`, `ENDPOINT_TYPE_SWG`,
`ENDPOINT_TYPE_MANAGED_PROXY_LB`.
default_from_api: true
item_type: Api::Type::String
- !ruby/object:Api::Type::Array
name: rules
description: 'A list of rules associated with this NAT.'
Expand Down
2 changes: 2 additions & 0 deletions mmv1/products/datastore/Index.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ examples:
- !ruby/object:Provider::Terraform::Examples
name: 'datastore_index'
primary_resource_id: 'default'
test_env_vars:
project_id: :PROJECT_NAME
vars:
property_name_1: 'property_a'
property_name_2: 'property_b'
Expand Down
15 changes: 15 additions & 0 deletions mmv1/templates/terraform/examples/datastore_index.tf.erb
Original file line number Diff line number Diff line change
@@ -1,3 +1,16 @@
resource "google_firestore_database" "database" {
project = "<%= ctx[:test_env_vars]['project_id'] %>"
# google_datastore_index resources only support the (default) database.
# However, google_firestore_index can express any Datastore Mode index
# and should be preferred in all cases.
name = "(default)"
location_id = "nam5"
type = "DATASTORE_MODE"

delete_protection_state = "DELETE_PROTECTION_DISABLED"
deletion_policy = "DELETE"
}

resource "google_datastore_index" "<%= ctx[:primary_resource_id] %>" {
kind = "foo"
properties {
Expand All @@ -8,4 +21,6 @@ resource "google_datastore_index" "<%= ctx[:primary_resource_id] %>" {
name = "<%= ctx[:vars]['property_name_2'] %>"
direction = "ASCENDING"
}

depends_on = [google_firestore_database.database]
}
Original file line number Diff line number Diff line change
Expand Up @@ -256,19 +256,17 @@ func bigQueryTableNormalizePolicyTags(val interface{}) interface{} {

// Compares two existing schema implementations and decides if
// it is changeable.. pairs with a force new on not changeable
func resourceBigQueryTableSchemaIsChangeable(old, new interface{}) (bool, error) {
func resourceBigQueryTableSchemaIsChangeable(old, new interface{}, isExternalTable bool, topLevel bool) (bool, error) {
switch old.(type) {
case []interface{}:
arrayOld := old.([]interface{})
arrayNew, ok := new.([]interface{})
sameNameColumns := 0
droppedColumns := 0
if !ok {
// if not both arrays not changeable
return false, nil
}
if len(arrayOld) > len(arrayNew) {
// if not growing not changeable
return false, nil
}
if err := bigQueryTablecheckNameExists(arrayOld); err != nil {
return false, err
}
Expand All @@ -289,16 +287,28 @@ func resourceBigQueryTableSchemaIsChangeable(old, new interface{}) (bool, error)
}
}
for key := range mapOld {
// all old keys should be represented in the new config
// dropping top level columns can happen in-place
// but this doesn't apply to external tables
if _, ok := mapNew[key]; !ok {
return false, nil
if !topLevel || isExternalTable {
return false, nil
}
droppedColumns += 1
continue
}
if isChangable, err :=
resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key]); err != nil || !isChangable {

isChangable, err := resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key], isExternalTable, false)
if err != nil || !isChangable {
return false, err
} else if isChangable && topLevel {
// top level column that exists in the new schema
sameNameColumns += 1
}
}
return true, nil
// in-place column dropping alongside column additions is not allowed
// as of now because user intention can be ambiguous (e.g. column renaming)
newColumns := len(arrayNew) - sameNameColumns
return (droppedColumns == 0) || (newColumns == 0), nil
case map[string]interface{}:
objectOld := old.(map[string]interface{})
objectNew, ok := new.(map[string]interface{})
Expand Down Expand Up @@ -337,7 +347,7 @@ func resourceBigQueryTableSchemaIsChangeable(old, new interface{}) (bool, error)
return false, nil
}
case "fields":
return resourceBigQueryTableSchemaIsChangeable(valOld, valNew)
return resourceBigQueryTableSchemaIsChangeable(valOld, valNew, isExternalTable, false)

// other parameters: description, policyTags and
// policyTags.names[] are changeable
Expand Down Expand Up @@ -376,7 +386,8 @@ func resourceBigQueryTableSchemaCustomizeDiffFunc(d tpgresource.TerraformResourc
// same as above
log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err)
}
isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new)
_, isExternalTable := d.GetOk("external_data_configuration")
isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new, isExternalTable, true)
if err != nil {
return err
}
Expand Down Expand Up @@ -1710,6 +1721,12 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error {
return nil
}

type TableReference struct {
project string
datasetID string
tableID string
}

func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
Expand All @@ -1732,13 +1749,62 @@ func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error
datasetID := d.Get("dataset_id").(string)
tableID := d.Get("table_id").(string)

tableReference := &TableReference{
project: project,
datasetID: datasetID,
tableID: tableID,
}

if err = resourceBigQueryTableColumnDrop(config, userAgent, table, tableReference); err != nil {
return err
}

if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, tableID, table).Do(); err != nil {
return err
}

return resourceBigQueryTableRead(d, meta)
}

func resourceBigQueryTableColumnDrop(config *transport_tpg.Config, userAgent string, table *bigquery.Table, tableReference *TableReference) error {
oldTable, err := config.NewBigQueryClient(userAgent).Tables.Get(tableReference.project, tableReference.datasetID, tableReference.tableID).Do()
if err != nil {
return err
}

newTableFields := map[string]bool{}
for _, field := range table.Schema.Fields {
newTableFields[field.Name] = true
}

droppedColumns := []string{}
for _, field := range oldTable.Schema.Fields {
if !newTableFields[field.Name] {
droppedColumns = append(droppedColumns, field.Name)
}
}

if len(droppedColumns) > 0 {
droppedColumnsString := strings.Join(droppedColumns, ", DROP COLUMN ")

dropColumnsDDL := fmt.Sprintf("ALTER TABLE `%s.%s.%s` DROP COLUMN %s", tableReference.project, tableReference.datasetID, tableReference.tableID, droppedColumnsString)
log.Printf("[INFO] Dropping columns in-place: %s", dropColumnsDDL)

useLegacySQL := false
req := &bigquery.QueryRequest{
Query: dropColumnsDDL,
UseLegacySql: &useLegacySQL,
}

_, err = config.NewBigQueryClient(userAgent).Jobs.Query(tableReference.project, req).Do()
if err != nil {
return err
}
}

return nil
}

func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error {
if d.Get("deletion_protection").(bool) {
return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -389,10 +389,11 @@ func TestBigQueryTableSchemaDiffSuppress(t *testing.T) {
}

type testUnitBigQueryDataTableJSONChangeableTestCase struct {
name string
jsonOld string
jsonNew string
changeable bool
name string
jsonOld string
jsonNew string
isExternalTable bool
changeable bool
}

func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testing.T) {
Expand All @@ -403,7 +404,7 @@ func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testin
if err := json.Unmarshal([]byte(testcase.jsonNew), &new); err != nil {
t.Fatalf("unable to unmarshal json - %v", err)
}
changeable, err := resourceBigQueryTableSchemaIsChangeable(old, new)
changeable, err := resourceBigQueryTableSchemaIsChangeable(old, new, testcase.isExternalTable, true)
if err != nil {
t.Errorf("%s failed unexpectedly: %s", testcase.name, err)
}
Expand All @@ -419,6 +420,11 @@ func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testin
d.Before["schema"] = testcase.jsonOld
d.After["schema"] = testcase.jsonNew

if testcase.isExternalTable {
d.Before["external_data_configuration"] = ""
d.After["external_data_configuration"] = ""
}

err = resourceBigQueryTableSchemaCustomizeDiffFunc(d)
if err != nil {
t.Errorf("error on testcase %s - %v", testcase.name, err)
Expand All @@ -428,7 +434,7 @@ func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testin
}
}

var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJSONChangeableTestCase{
var testUnitBigQueryDataTableIsChangeableTestCases = []testUnitBigQueryDataTableJSONChangeableTestCase{
{
name: "defaultEquality",
jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
Expand All @@ -445,7 +451,14 @@ var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJ
name: "arraySizeDecreases",
jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
changeable: false,
changeable: true,
},
{
name: "externalArraySizeDecreases",
jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
isExternalTable: true,
changeable: false,
},
{
name: "descriptionChanges",
Expand Down Expand Up @@ -523,6 +536,24 @@ var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJ
jsonNew: "[{\"name\": \"value3\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"newVal\" }, {\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
changeable: false,
},
{
name: "renameRequiredColumn",
jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]",
jsonNew: "[{\"name\": \"value3\", \"type\" : \"INTEGER\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]",
changeable: false,
},
{
name: "renameNullableColumn",
jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
jsonNew: "[{\"name\": \"value3\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
changeable: false,
},
{
name: "typeModeReqToNullAndColumnDropped",
jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }, {\"name\": \"someValue2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]",
jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]",
changeable: true,
},
{
name: "policyTags",
jsonOld: `[
Expand All @@ -548,15 +579,29 @@ var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJ
},
}

func TestUnitBigQueryDataTable_schemaIsChangable(t *testing.T) {
func TestUnitBigQueryDataTable_schemaIsChangeable(t *testing.T) {
t.Parallel()
for _, testcase := range testUnitBigQueryDataTableIsChangableTestCases {
for _, testcase := range testUnitBigQueryDataTableIsChangeableTestCases {
testcase.check(t)
}
}

func TestUnitBigQueryDataTable_schemaIsChangeableNested(t *testing.T) {
t.Parallel()
// Only top level column drops are changeable
customNestedValues := map[string]bool{"arraySizeDecreases": false, "typeModeReqToNullAndColumnDropped": false}
for _, testcase := range testUnitBigQueryDataTableIsChangeableTestCases {
changeable := testcase.changeable
if overrideValue, ok := customNestedValues[testcase.name]; ok {
changeable = overrideValue
}

testcaseNested := &testUnitBigQueryDataTableJSONChangeableTestCase{
testcase.name + "Nested",
fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"fields\" : %s }]", testcase.jsonOld),
fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INT64\", \"fields\" : %s }]", testcase.jsonNew),
testcase.changeable,
testcase.isExternalTable,
changeable,
}
testcaseNested.check(t)
}
Expand Down
Loading
Loading