diff --git a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java index d9dca564bc4b..c58fc474ebbf 100644 --- a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java +++ b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java @@ -31,6 +31,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.solr.client.solrj.SolrRequest; +import org.apache.solr.common.ParWork; import org.apache.solr.common.SolrException; import org.apache.solr.common.annotation.SolrThreadSafe; import org.apache.solr.common.cloud.DocCollection; @@ -50,6 +51,7 @@ import org.apache.solr.servlet.HttpSolrCall; import org.apache.solr.servlet.SolrDispatchFilter; import org.apache.solr.servlet.SolrRequestParsers; +import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -212,9 +214,11 @@ protected DocCollection resolveDocCollection(String collectionStr) { // ensure our view is up to date before trying again try { zkStateReader.aliasesManager.update(); - zkStateReader.forceUpdateCollection(collectionsList.get(0)); } catch (Exception e) { - log.error("Error trying to update state while resolving collection.", e); + ParWork.propegateInterrupt("Error trying to update state while resolving collection.", e); + if (e instanceof KeeperException.SessionExpiredException) { + throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); + } //don't propagate exception on purpose } return logic.get(); diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java index 8276babe149f..8b61804dbab2 100644 --- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java +++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java @@ -127,7 +127,6 @@ public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList slice = new AtomicReference<>(); slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP)); Set offlineSlices = new HashSet<>(); @@ -680,12 +679,7 @@ private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionN List subSlices, Set offlineSlices) { log.info("Cleaning up after a failed split of {}/{}", collectionName, parentShard); // get the latest state - try { - zkStateReader.forceUpdateCollection(collectionName); - } catch (KeeperException | InterruptedException e) { - log.warn("Cleanup failed after failed split of {}/{} : (force update collection)", collectionName, parentShard, e); - return; - } + ClusterState clusterState = zkStateReader.getClusterState(); DocCollection coll = clusterState.getCollectionOrNull(collectionName); diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java index 6c4daa5a8897..655f388c2b13 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java @@ -400,11 +400,6 @@ private NamedList getStatus() { private void handleCollectionCheckpointAction(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, SolrServerException { ZkController zkController = core.getCoreContainer().getZkController(); - try { - zkController.getZkStateReader().forceUpdateCollection(collection); - } catch (Exception e) { - log.warn("Error when updating cluster state", e); - } ClusterState cstate = zkController.getClusterState(); DocCollection docCollection = cstate.getCollectionOrNull(collection); Collection shards = docCollection == null? null : docCollection.getActiveSlices(); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java index 81900c32ed3e..547c5507f79a 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java @@ -176,7 +176,6 @@ private DocCollection checkParams() throws KeeperException, InterruptedException throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command.")); } - coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName); ClusterState clusterState = coreContainer.getZkController().getClusterState(); DocCollection dc = clusterState.getCollection(collectionName); @@ -208,7 +207,6 @@ private void checkLeaderStatus() throws InterruptedException, KeeperException { } } TimeUnit.MILLISECONDS.sleep(100); - coreContainer.getZkController().getZkStateReader().forciblyRefreshAllClusterStateSlow(); } addAnyFailures(); } @@ -393,7 +391,6 @@ int waitForNodeChange(Slice slice, String electionNode) throws InterruptedExcept } } TimeUnit.MILLISECONDS.sleep(100); - zkStateReader.forciblyRefreshAllClusterStateSlow(); } return -1; } diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java index db38cf7852c7..01c958936191 100644 --- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java +++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java @@ -482,7 +482,6 @@ protected void extractRemotePath(String collectionName, String origCorename) thr if (!retry) { // we couldn't find a core to work with, try reloading aliases & this collection cores.getZkController().getZkStateReader().aliasesManager.update(); - cores.getZkController().zkStateReader.forceUpdateCollection(collectionName); // TODO: remove action = RETRY; } } diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index 7f6522e661f5..d3e7cdf2a56f 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -739,7 +739,6 @@ private void testStopAndStartCoresInOneInstance() throws Exception { printLayout(); cloudJettys.get(0).jetty.start(); - cloudClient.getZkStateReader().forceUpdateCollection("multiunload2"); try { cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000); } catch (SolrException e) { @@ -1030,7 +1029,6 @@ private void testANewCollectionInOneInstanceWithManualShardAssignement() throws // we added a role of none on these creates - check for it ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.forceUpdateCollection(oneInstanceCollection2); Map slices = zkStateReader.getClusterState().getCollection(oneInstanceCollection2).getSlicesMap(); assertNotNull(slices); diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java index 7d917b499332..2b99ff6e093a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java @@ -108,10 +108,9 @@ public void testReplicasInLowerTerms() throws Exception { putNonLeadersIntoLowerTerm(testCollectionName, SHARD1, zkController, leader, notLeaders, cloudClient); for (Replica replica : notLeaders) { - waitForState(testCollectionName, replica.getName(), State.DOWN, 60000); + waitForState(testCollectionName, replica.getName(), State.DOWN, 10000); } - waitForState(testCollectionName, leader.getName(), State.DOWN, 60000); - cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); + waitForState(testCollectionName, leader.getName(), State.DOWN, 10000); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); int numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1); assertEquals("Expected only 0 active replica but found " + numActiveReplicas + @@ -139,7 +138,6 @@ public void testReplicasInLowerTerms() throws Exception { // By now we have an active leader. Wait for recoveries to begin waitForRecoveriesToFinish(testCollectionName, cloudClient.getZkStateReader(), true); - cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); clusterState = cloudClient.getZkStateReader().getClusterState(); if (log.isInfoEnabled()) { log.info("After forcing leader: {}", clusterState.getCollection(testCollectionName).getSlice(SHARD1)); @@ -262,7 +260,7 @@ private void bringBackOldLeaderAndSendDoc(String collection, Replica leader, Lis getProxyForReplica(leader).reopen(); leaderJetty.start(); waitForRecoveriesToFinish(collection, cloudClient.getZkStateReader(), true); - cloudClient.getZkStateReader().forceUpdateCollection(collection); + ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); if (log.isInfoEnabled()) { log.info("After bringing back leader: {}", clusterState.getCollection(collection).getSlice(SHARD1)); diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java index c673bf7d4a8d..2028f825e017 100644 --- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java @@ -112,7 +112,6 @@ private void multiShardTest() throws Exception { Thread.sleep(sleepMsBeforeHealPartition); - cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState()); @@ -165,7 +164,6 @@ private void oneShardTest() throws Exception { sendCommitWithRetry(replica); Thread.sleep(sleepMsBeforeHealPartition); - cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState()); diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java index 7a269fa71479..64315a408156 100644 --- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java @@ -647,7 +647,6 @@ protected void waitToSeeReplicasActive(String testCollectionName, String shardId final RTimer timer = new RTimer(); ZkStateReader zkr = cloudClient.getZkStateReader(); - zkr.forceUpdateCollection(testCollectionName); ClusterState cs = zkr.getClusterState(); boolean allReplicasUp = false; long waitMs = 0L; diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java index 0dc4c169cf99..4df2393303b9 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java @@ -69,7 +69,7 @@ public static void setupCluster() throws Exception { @Test public void test() throws KeeperException, InterruptedException, IOException, SolrServerException { ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader(); - stateReader.forceUpdateCollection(TEST_COLLECTION_1); + ClusterState clusterState = stateReader.getClusterState(); // The test assume that TEST_COLLECTION_1 and TEST_COLLECTION_2 will have identical layout // ( same replica's name on every shard ) diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java index 695ce19f6297..eae66e81b442 100644 --- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java @@ -67,7 +67,6 @@ private boolean waitForRuleToExpire(String collection, String shard, String spli boolean ruleRemoved = false; long expiryTime = finishTime + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); while (System.nanoTime() < expiryTime) { - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection); state = getCollectionState(collection); slice = state.getSlice(shard); Map routingRules = slice.getRoutingRules(); diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java index 8241c6f52114..263a7a967869 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java @@ -701,7 +701,6 @@ public void testOverseerFailure() throws Exception { reader.waitForState(COLLECTION, 5000, TimeUnit.MILLISECONDS, (liveNodes, collectionState) -> collectionState != null && collectionState.getReplica(core_node) == null); - reader.forceUpdateCollection(COLLECTION); // as of SOLR-5209 core removal does not cascade to remove the slice and collection assertTrue(COLLECTION +" should remain after removal of the last core", reader.getClusterState().hasCollection(COLLECTION)); @@ -1059,7 +1058,6 @@ public void testDoubleAssignment() throws Exception { mockController.publishState(COLLECTION, "core1", "core_node1","shard1", Replica.State.RECOVERING, 1, true, overseers.get(0)); - reader.forceUpdateCollection(COLLECTION); ClusterState state = reader.getClusterState(); int numFound = 0; diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java index f0af14455704..6d0602a008cb 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java @@ -481,7 +481,6 @@ private void doTestNoLeader(boolean removeReplica) throws Exception { unIgnoreException("No registered leader was found"); // Should have a leader from now on // Validate that the new nrt replica is the leader now - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName); docCollection = getCollectionState(collectionName); leader = docCollection.getSlice("shard1").getLeader(); assertTrue(leader != null && leader.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())); @@ -575,7 +574,6 @@ private void waitForDeletion(String collection) throws InterruptedException, Kee if (t.hasTimedOut()) { fail("Timed out waiting for collection " + collection + " to be deleted."); } - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection); } catch(SolrException e) { return; } @@ -584,9 +582,7 @@ private void waitForDeletion(String collection) throws InterruptedException, Kee } private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException { - if (updateCollection) { - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName); - } + DocCollection docCollection = getCollectionState(collectionName); assertNotNull(docCollection); assertEquals("Unexpected number of writer replicas: " + docCollection, numNrtReplicas, diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java index 63ac70d865fd..e9c6218ea9c0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java @@ -269,9 +269,7 @@ private void addDocs(int numDocs) throws SolrServerException, IOException { } private DocCollection assertNumberOfReplicas(int numWriter, int numActive, int numPassive, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException { - if (updateCollection) { - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName); - } + DocCollection docCollection = getCollectionState(collectionName); assertNotNull(docCollection); assertEquals("Unexpected number of writer replicas: " + docCollection, numWriter, @@ -316,7 +314,6 @@ private void waitForDeletion(String collection) throws InterruptedException, Kee if (t.hasTimedOut()) { fail("Timed out waiting for collection " + collection + " to be deleted."); } - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection); } catch(SolrException e) { return; } diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java index 37a134ee00eb..1e8dc97aee05 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java @@ -148,7 +148,6 @@ private void testQueryAgainstDownReplica() throws Exception { waitForRecoveriesToFinish("football", true); - cloudClient.getZkStateReader().forceUpdateCollection("football"); Replica leader = null; Replica notLeader = null; diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java index d6506c29a108..4dbd1d134172 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java @@ -583,7 +583,7 @@ protected long deleteDocAndGetVersion(String id, ModifiableSolrParams params, bo */ public SolrClient getClientForLeader() throws KeeperException, InterruptedException { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION); + ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); Replica leader = null; Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java index 1a7e5f5f89d2..3d683e443693 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java @@ -821,7 +821,6 @@ private void waitForDeletion(String collection) throws InterruptedException, Kee if (t.hasTimedOut()) { fail("Timed out waiting for collection " + collection + " to be deleted."); } - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection); } catch(SolrException e) { return; } @@ -830,9 +829,7 @@ private void waitForDeletion(String collection) throws InterruptedException, Kee } private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException { - if (updateCollection) { - cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName); - } + DocCollection docCollection = getCollectionState(collectionName); assertNotNull(docCollection); assertEquals("Unexpected number of nrt replicas: " + docCollection, numNrtReplicas, diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java index a43e2f9d7af0..84a1e8d6423e 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java @@ -132,7 +132,6 @@ public static void createMiniSolrCloudCluster() throws Exception { String nodeKey = jetty.getHost() + ":" + jetty.getLocalPort() + jetty.getBaseUrl().replace("/","_"); urlMap.put(nodeKey, jettyURL.toString()); } - zkStateReader.forceUpdateCollection(COLLECTION_NAME); ClusterState clusterState = zkStateReader.getClusterState(); for (Slice slice : clusterState.getCollection(COLLECTION_NAME).getSlices()) { String shardName = slice.getName(); diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java index 6cf4af4dc05f..53124e0e8c61 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java @@ -305,8 +305,6 @@ public List getCoreDescriptors() { propMap.put(ZkStateReader.STATE_PROP, "down"); zkController.getOverseerJobQueue().offer(Utils.toJSON(propMap)); - zkController.getZkStateReader().forciblyRefreshAllClusterStateSlow(); - long now = System.nanoTime(); long timeout = now + TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS); zkController.publishAndWaitForDownStates(5); diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java index 8d544d964fc6..8c4f8badc0a1 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java @@ -144,8 +144,6 @@ public void testAsyncRequests() throws Exception { .processAndWait(client, MAX_TIMEOUT_SECONDS); assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state); - client.getZkStateReader().forceUpdateCollection(collection); - //Add a doc to shard2 to make sure shard2 was created properly SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", numDocs + 1); @@ -208,8 +206,7 @@ public void testAsyncRequests() throws Exception { break; } } - client.getZkStateReader().forceUpdateCollection(collection); - + shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1"); String replicaName = shard1.getReplicas().iterator().next().getName(); state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName) diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java index 736f44f3747d..d5053099c9dc 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java @@ -268,7 +268,6 @@ private void testNoConfigset() throws Exception { // Now try deleting the configset and doing a clusterstatus. String parent = ZkConfigManager.CONFIGS_ZKNODE + "/" + configSet; deleteThemAll(client.getZkStateReader().getZkClient(), parent); - client.getZkStateReader().forciblyRefreshAllClusterStateSlow(); final CollectionAdminRequest.ClusterStatus req = CollectionAdminRequest.getClusterStatus(); NamedList rsp = client.request(req); @@ -923,7 +922,6 @@ private void testClusterStateMigration() throws Exception { CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client); - client.getZkStateReader().forceUpdateCollection("testClusterStateMigration"); assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat()); @@ -1033,7 +1031,6 @@ private void testShardCreationNameValidation() throws Exception { private Map getProps(CloudSolrClient client, String collectionName, String replicaName, String... props) throws KeeperException, InterruptedException { - client.getZkStateReader().forceUpdateCollection(collectionName); ClusterState clusterState = client.getZkStateReader().getClusterState(); final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName); if (docCollection == null || docCollection.getReplica(replicaName) == null) { diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java index 548499c078bc..f3bad87d2a3c 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java @@ -137,7 +137,7 @@ public void testCollectionCreateSearchDelete() throws Exception { // remove a server not hosting any replicas ZkStateReader zkStateReader = client.getZkStateReader(); - zkStateReader.forceUpdateCollection(collectionName); + ClusterState clusterState = zkStateReader.getClusterState(); Map jettyMap = new HashMap<>(); for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { @@ -244,7 +244,7 @@ public void testStopAllStartAll() throws Exception { assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound()); // the test itself - zkStateReader.forceUpdateCollection(collectionName); + final ClusterState clusterState = zkStateReader.getClusterState(); final Set leaderIndices = new HashSet<>(); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java index 66af1c6dcbd5..c32cd24c3b83 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java @@ -172,7 +172,6 @@ private void createAndDeleteCollection() throws Exception { } cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION); - cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION); for (int i = 1; i < nShards + 1; i++) { cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000); diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java index 279b164c816f..1a75ac917c87 100644 --- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java @@ -99,11 +99,11 @@ public void testStateFormatUpdate(boolean explicitRefresh, boolean isInteresting boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true); assertFalse(exists); - if (explicitRefresh) { - reader.forceUpdateCollection("c1"); - } else { + // if (explicitRefresh) { + //reader.forceUpdateCollection("c1"); + // } else { reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null); - } + // } DocCollection collection = reader.getClusterState().getCollection("c1"); assertEquals(1, collection.getStateFormat()); @@ -122,12 +122,12 @@ public void testStateFormatUpdate(boolean explicitRefresh, boolean isInteresting boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true); assertTrue(exists); - if (explicitRefresh) { - reader.forceUpdateCollection("c1"); - } else { + // if (explicitRefresh) { + // reader.forceUpdateCollection("c1"); + // } else { reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null && c.getStateFormat() == 2); - } + // } DocCollection collection = reader.getClusterState().getCollection("c1"); assertEquals(2, collection.getStateFormat()); @@ -163,7 +163,6 @@ public void testExternalCollectionWatchedNotWatched() throws Exception{ new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json")); writer.enqueueUpdate(reader.getClusterState(), Collections.singletonList(c1), null); writer.writePendingUpdates(reader.getClusterState()); - reader.forceUpdateCollection("c1"); assertTrue(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded()); reader.registerCore("c1"); @@ -247,7 +246,6 @@ public void testWatchedCollectionCreation() throws Exception { assertNull(reader.getClusterState().getCollectionRef("c1")); zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true); - reader.forceUpdateCollection("c1"); // Still no c1 collection, despite a collection path. assertNull(reader.getClusterState().getCollectionRef("c1")); diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java index 639e5d118b6a..8b30ee246f5c 100644 --- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java @@ -221,8 +221,7 @@ public void testExternalModificationToSharedClusterState() throws Exception { writer.enqueueUpdate(reader.getClusterState(), Collections.singletonList(c1), null); writer.writePendingUpdates(reader.getClusterState()); - reader.forceUpdateCollection("c1"); - reader.forceUpdateCollection("c2"); + ClusterState clusterState = reader.getClusterState(); // keep a reference to the current cluster state object assertTrue(clusterState.hasCollection("c1")); assertFalse(clusterState.hasCollection("c2")); @@ -304,8 +303,6 @@ public void testExternalModificationToStateFormat2() throws Exception { byte[] data = zkClient.getData(ZkStateReader.getCollectionPath("c2"), null, null, true); zkClient.setData(ZkStateReader.getCollectionPath("c2"), data, true); - // get the most up-to-date state - reader.forceUpdateCollection("c2"); state = reader.getClusterState(); log.info("Cluster state: {}", state); assertTrue(state.hasCollection("c2")); @@ -315,8 +312,6 @@ public void testExternalModificationToStateFormat2() throws Exception { writer.enqueueUpdate(state, Collections.singletonList(c2), null); assertTrue(writer.hasPendingUpdates()); - // get the most up-to-date state - reader.forceUpdateCollection("c2"); state = reader.getClusterState(); // Will trigger flush diff --git a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java index e06911d53bf9..6d19a986a207 100644 --- a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java +++ b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java @@ -195,7 +195,6 @@ private void resetDelays() { private void mapReplicasToClients() throws KeeperException, InterruptedException { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); Replica leader = null; Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1); @@ -1006,7 +1005,7 @@ private void delayedReorderingFetchesMissingUpdateFromLeaderTest() throws Except // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't) for (int i=0; i<100; i++) { Thread.sleep(10); - cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION); + ClusterState state = cloudClient.getZkStateReader().getClusterState(); int numActiveReplicas = 0; @@ -1079,7 +1078,7 @@ private void delayedReorderingFetchesMissingUpdateFromLeaderTest() throws Except try (ZkShardTerms zkShardTerms = new ZkShardTerms(DEFAULT_COLLECTION, SHARD1, cloudClient.getZkStateReader().getZkClient())) { for (int i=0; i<100; i++) { Thread.sleep(10); - cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION); + ClusterState state = cloudClient.getZkStateReader().getClusterState(); int numActiveReplicas = 0; @@ -1333,7 +1332,7 @@ private void reorderedDBQsUsingUpdatedValueFromADroppedUpdate() throws Exception // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't) for (int i=0; i<100; i++) { Thread.sleep(10); - cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION); + ClusterState state = cloudClient.getZkStateReader().getClusterState(); int numActiveReplicas = 0; diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java index 0ea782e98a84..811d87f14623 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java @@ -365,87 +365,6 @@ public ZkConfigManager getConfigManager() { return configManager; } - /** - * Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive. - *

- * It is cheaper to call {@link #forceUpdateCollection(String)} on a single collection if you must. - * - * @lucene.internal - */ - public void forciblyRefreshAllClusterStateSlow() throws KeeperException, InterruptedException { - synchronized (getUpdateLock()) { - if (clusterState == null) { - // Never initialized, just run normal initialization. - createClusterStateWatchersAndUpdate(); - return; - } - // No need to set watchers because we should already have watchers registered for everything. - refreshCollectionList(null); - refreshLiveNodes(null); - refreshLegacyClusterState(null); - // Need a copy so we don't delete from what we're iterating over. - Collection safeCopy = new ArrayList<>(watchedCollectionStates.keySet()); - Set updatedCollections = new HashSet<>(); - for (String coll : safeCopy) { - DocCollection newState = fetchCollectionState(coll, null); - if (updateWatchedCollection(coll, newState)) { - updatedCollections.add(coll); - } - } - constructState(updatedCollections); - } - } - - /** - * Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when - * a better design is possible. - */ - //TODO shouldn't we call ZooKeeper.sync() at the right places to prevent reading a stale value? We do so for aliases. - public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException { - - synchronized (getUpdateLock()) { - if (clusterState == null) { - log.warn("ClusterState watchers have not been initialized"); - return; - } - - ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection); - if (ref == null || legacyCollectionStates.containsKey(collection)) { - // We either don't know anything about this collection (maybe it's new?) or it's legacy. - // First update the legacy cluster state. - log.debug("Checking legacy cluster state for collection {}", collection); - refreshLegacyClusterState(null); - if (!legacyCollectionStates.containsKey(collection)) { - // No dice, see if a new collection just got created. - LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection); - if (tryLazyCollection.get() != null) { - // What do you know, it exists! - log.debug("Adding lazily-loaded reference for collection {}", collection); - lazyCollectionStates.putIfAbsent(collection, tryLazyCollection); - constructState(Collections.singleton(collection)); - } - } - } else if (ref.isLazilyLoaded()) { - log.debug("Refreshing lazily-loaded state for collection {}", collection); - if (ref.get() != null) { - return; - } - // Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there. - refreshLegacyClusterState(null); - } else if (watchedCollectionStates.containsKey(collection)) { - // Exists as a watched collection, force a refresh. - log.debug("Forcing refresh of watched collection state for {}", collection); - DocCollection newState = fetchCollectionState(collection, null); - if (updateWatchedCollection(collection, newState)) { - constructState(Collections.singleton(collection)); - } - } else { - log.error("Collection {} is not lazy or watched!", collection); - } - } - - } - /** * Refresh the set of live nodes. */ diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java index 228c18aa3f5b..8e28cd0da17e 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java @@ -215,7 +215,6 @@ static void waitForNewLeader(CloudSolrClient cloudClient, String shardName, Repl throws Exception { log.info("Will wait for a node to become leader for {} secs", timeOut.timeLeft(SECONDS)); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION); for (; ; ) { ClusterState clusterState = zkStateReader.getClusterState(); @@ -266,7 +265,6 @@ public static void verifyReplicaStatus(ZkStateReader reader, String collection, protected static void assertAllActive(String collection, ZkStateReader zkStateReader) throws KeeperException, InterruptedException { - zkStateReader.forceUpdateCollection(collection); ClusterState clusterState = zkStateReader.getClusterState(); final DocCollection docCollection = clusterState.getCollectionOrNull(collection); if (docCollection == null || docCollection.getSlices() == null) { diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java index b438df3366ee..516203014e0b 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java @@ -885,7 +885,7 @@ protected void updateMappingsFromZk(List jettys, List jettys, List clients, boolean allowOverSharding) throws Exception { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION); + cloudJettys.clear(); shardToJetty.clear(); @@ -2141,7 +2141,6 @@ protected void logReplicaTypesReplicationInfo(String collectionName, ZkStateRead log.info("## Collecting extra Replica.Type information of the cluster"); zkStateReader.updateLiveNodes(); StringBuilder builder = new StringBuilder(); - zkStateReader.forceUpdateCollection(collectionName); DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName); for(Slice s:collection.getSlices()) { Replica leader = s.getLeader(); @@ -2161,7 +2160,6 @@ protected void logReplicaTypesReplicationInfo(String collectionName, ZkStateRead protected void waitForReplicationFromReplicas(String collectionName, ZkStateReader zkStateReader, TimeOut timeout) throws KeeperException, InterruptedException, IOException { log.info("waitForReplicationFromReplicas: {}", collectionName); - zkStateReader.forceUpdateCollection(collectionName); DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName); Map containers = new HashMap<>(); for (JettySolrRunner runner:jettys) { diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java index 7dfdc36d7ce5..9da53e9e9855 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java @@ -417,9 +417,6 @@ private boolean canKillIndexer(String sliceName) throws KeeperException, Interru int numIndexersFoundInShard = 0; for (CloudJettyRunner cloudJetty : shardToJetty.get(sliceName)) { - // get latest cloud state - zkStateReader.forceUpdateCollection(collection); - DocCollection docCollection = zkStateReader.getClusterState().getCollection(collection); Slice slice = docCollection.getSlice(sliceName); @@ -445,10 +442,7 @@ private boolean canKillIndexer(String sliceName) throws KeeperException, Interru private int checkIfKillIsLegal(String sliceName, int numActive) throws KeeperException, InterruptedException { for (CloudJettyRunner cloudJetty : shardToJetty.get(sliceName)) { - - // get latest cloud state - zkStateReader.forceUpdateCollection(collection); - + DocCollection docCollection = zkStateReader.getClusterState().getCollection(collection); Slice slice = docCollection.getSlice(sliceName);