diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java index 1c77c1287377..45498617fecd 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java @@ -87,10 +87,9 @@ public void inform(ResourceLoader loader) throws IOException { String dicts[] = dictionaryFiles.split(","); InputStream affix = null; - List dictionaries = new ArrayList<>(); + List dictionaries = new ArrayList<>(dicts.length); try { - dictionaries = new ArrayList<>(); for (String file : dicts) { dictionaries.add(loader.openResource(file)); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java index 0397de7f82db..011a495040b6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java @@ -89,7 +89,7 @@ public CapitalizationFilterFactory(Map args) { k = getSet(args, OK_PREFIX); if (k != null) { - okPrefix = new ArrayList<>(); + okPrefix = new ArrayList<>(k.size()); for (String item : k) { okPrefix.add(item.toCharArray()); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java index 788db0a15a9b..aa5e83255cf6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java @@ -438,7 +438,7 @@ private void bufferOutputTokens(BytesRef bytes, int matchInputLength) { // TODO: we could encode this instead into the FST: // 1st pass: count how many new nodes we need - List> paths = new ArrayList<>(); + List> paths = new ArrayList<>(count); for(int outputIDX=0;outputIDX splitFileNames(String fileNames) { if (fileNames == null) return Collections.emptyList(); - List result = new ArrayList<>(); - for (String file : fileNames.split("(? result = new ArrayList<>(fileNamesArr.length); + for (String file : fileNamesArr) { result.add(file.replaceAll("\\\\(?=,)", "")); } diff --git a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java index 1b8abbba64e3..ce2369042279 100644 --- a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java +++ b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java @@ -68,8 +68,9 @@ public boolean accept(File dir, String name) { return name.endsWith(".csv"); } }; - ArrayList csvFiles = new ArrayList<>(); - for (File file : new File(dirname).listFiles(filter)) { + File[] foundFiles = new File(dirname).listFiles(filter); + ArrayList csvFiles = new ArrayList<>(foundFiles.length); + for (File file : foundFiles) { csvFiles.add(file); } Collections.sort(csvFiles); diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java index 330749835434..3783627957d0 100644 --- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java +++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java @@ -167,10 +167,10 @@ public int getToCount() { public List getShortPath() { int current; int nodeCount = getToCount(); - List path = new ArrayList<>(); PathNode zeroPath = new PathNode(); zeroPath.weight = 0; zeroPath.preNode = 0; + List path = new ArrayList<>(nodeCount + 1); path.add(zeroPath); for (current = 1; current <= nodeCount; current++) { double weight; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java index 983c67f1950c..d86797c9ab8a 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java @@ -78,7 +78,6 @@ public Gener() {} @Override public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; @@ -90,7 +89,7 @@ public Trie optimize(Trie orig) { } Arrays.fill(remap, -1); - rows = removeGaps(orig.root, orows, new ArrayList(), remap); + List rows = removeGaps(orig.root, orows, new ArrayList(), remap); return new Trie(orig.forward, remap[orig.root], cmds, rows); } diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java index 16da8c8149a6..7162906e1cae 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java @@ -88,7 +88,6 @@ public Lift(boolean changeSkip) { @Override public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; @@ -97,7 +96,7 @@ public Trie optimize(Trie orig) { } Arrays.fill(remap, -1); - rows = removeGaps(orig.root, orows, new ArrayList(), remap); + List rows = removeGaps(orig.root, orows, new ArrayList(), remap); return new Trie(orig.forward, remap[orig.root], cmds, rows); } diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java index e0d9376df6d6..bd59a32a0715 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java @@ -186,7 +186,7 @@ public void add(CharSequence key, CharSequence cmd) { */ @Override public Trie reduce(Reduce by) { - List h = new ArrayList<>(); + List h = new ArrayList<>(tries.size()); for (Trie trie : tries) h.add(trie.reduce(by)); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java index cfe3181ad234..fb04d1446557 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java @@ -277,7 +277,7 @@ public CharSequence[] decompose(CharSequence cmd) { */ @Override public Trie reduce(Reduce by) { - List h = new ArrayList<>(); + List h = new ArrayList<>(tries.size()); for (Trie trie : tries) h.add(trie.reduce(by)); diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java index a35f503cb983..267e1a782524 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54DocValuesProducer.java @@ -1531,7 +1531,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); resources.add(Accountables.namedAccountable("term bytes", terms)); resources.add(Accountables.namedAccountable("term addresses", termAddresses)); return Collections.unmodifiableList(resources); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java index b8d23bdb3d62..abc79881ab7e 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java @@ -843,7 +843,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); if (indexReader != null) { resources.add(Accountables.namedAccountable("term index", indexReader)); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java index adf8191adc77..6fda8ee8099c 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java @@ -263,7 +263,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); if (termOffsets != null) { resources.add(Accountables.namedAccountable("term lengths", termOffsets)); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java index fe6c8f6456a5..c7fa9011ef2e 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java @@ -631,7 +631,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); if (address != null) { resources.add(Accountables.namedAccountable("addresses", RamUsageEstimator.sizeOf(address))); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java index b81e56e087fe..df3cef72a8e4 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java @@ -774,7 +774,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); if (addresses != null) { resources.add(Accountables.namedAccountable("addresses", addresses)); } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java index 3d06b51a4ddc..aeb741c7c664 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java @@ -476,7 +476,7 @@ public int nextDoc() throws IOException { * an Iterable that merges ordinals and values and filters deleted documents . */ public void mergeSortedField(FieldInfo fieldInfo, final MergeState mergeState) throws IOException { - List toMerge = new ArrayList<>(); + List toMerge = new ArrayList<>(mergeState.docValuesProducers.length); for (int i=0;i toMerge = new ArrayList<>(); + List toMerge = new ArrayList<>(mergeState.docValuesProducers.length); for (int i=0;i fields = new ArrayList<>(); - final List slices = new ArrayList<>(); + final List fields = new ArrayList<>(mergeState.fieldsProducers.length); + final List slices = new ArrayList<>(mergeState.fieldsProducers.length); int docBase = 0; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java index 0540f4f7ce89..b80735d05c35 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java @@ -110,7 +110,7 @@ public int nextDoc() { * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public int merge(MergeState mergeState) throws IOException { - List subs = new ArrayList<>(); + List subs = new ArrayList<>(mergeState.storedFieldsReaders.length); for(int i=0;i subs = new ArrayList<>(); + List subs = new ArrayList<>(mergeState.termVectorsReaders.length); for(int i=0;i getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(2); long docBaseDeltaBytes = RamUsageEstimator.shallowSizeOf(docBasesDeltas); for (PackedInts.Reader r : docBasesDeltas) { diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java index 5b42870fac76..037194e3c6b9 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java @@ -502,7 +502,7 @@ public int merge(MergeState mergeState) throws IOException { * If all readers are compressed and they have the same fieldinfos then we can merge the serialized document * directly. */ - List subs = new ArrayList<>(); + List subs = new ArrayList<>(mergeState.storedFieldsReaders.length); for(int i=0;i getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(readers.size()); for(Map.Entry ent : readers.entrySet()) { resources.add(Accountables.namedAccountable(readState.fieldInfos.fieldInfo(ent.getKey()).name, ent.getValue())); diff --git a/lucene/core/src/java/org/apache/lucene/geo/SimpleGeoJSONPolygonParser.java b/lucene/core/src/java/org/apache/lucene/geo/SimpleGeoJSONPolygonParser.java index 278307f512e1..360449d9aac5 100644 --- a/lucene/core/src/java/org/apache/lucene/geo/SimpleGeoJSONPolygonParser.java +++ b/lucene/core/src/java/org/apache/lucene/geo/SimpleGeoJSONPolygonParser.java @@ -79,16 +79,16 @@ public Polygon[] parse() throws ParseException { if (polyType.equals("Polygon")) { return new Polygon[] {parsePolygon(coordinates)}; } else { - List polygons = new ArrayList<>(); + Polygon[] polygons = new Polygon[coordinates.size()]; for(int i=0;i) o)); + polygons[i] = (parsePolygon((List) o)); } - return polygons.toArray(new Polygon[polygons.size()]); + return polygons; } } @@ -217,11 +217,11 @@ private boolean isValidGeometryPath(String path) { } private Polygon parsePolygon(List coordinates) throws ParseException { - List holes = new ArrayList<>(); Object o = coordinates.get(0); if (o instanceof List == false) { throw newParseException("first element of polygon array must be an array [[lat, lon], [lat, lon] ...] but got: " + o); } + List holes = new ArrayList<>(coordinates.size()); double[][] polyPoints = parsePoints((List) o); for(int i=1;i numericPacket = new ArrayList<>(); + List numericPacket = new ArrayList<>(in.numericDVUpdates.length); numericDVUpdates.add(numericPacket); for (NumericDocValuesUpdate nu : in.numericDVUpdates) { NumericDocValuesUpdate clone = new NumericDocValuesUpdate(nu.term, nu.field, (Long) nu.value); @@ -62,7 +62,7 @@ void update(FrozenBufferedUpdates in) { numericPacket.add(clone); } - List binaryPacket = new ArrayList<>(); + List binaryPacket = new ArrayList<>(in.binaryDVUpdates.length); binaryDVUpdates.add(binaryPacket); for (BinaryDocValuesUpdate bu : in.binaryDVUpdates) { BinaryDocValuesUpdate clone = new BinaryDocValuesUpdate(bu.term, bu.field, (BytesRef) bu.value); diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java index cbf2ae27193e..5d03319573ae 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java @@ -4320,7 +4320,7 @@ private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) thr // System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders()); // Let the merge wrap readers - List mergeReaders = new ArrayList<>(); + List mergeReaders = new ArrayList<>(merge.readers.size()); for (SegmentReader reader : merge.readers) { mergeReaders.add(merge.wrapForMerge(reader)); } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java index 3970e0a6524e..b47feee5658d 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java @@ -913,7 +913,7 @@ public long ramBytesUsed() { @Override public Collection getChildResources() { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(3); resources.add(Accountables.namedAccountable("global ord deltas", globalOrdDeltas)); resources.add(Accountables.namedAccountable("first segments", firstSegments)); resources.add(Accountables.namedAccountable("segment map", segmentMap)); diff --git a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java index 4540c852fc6d..e20a969ade30 100644 --- a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java +++ b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java @@ -161,8 +161,8 @@ public int freq() throws IOException { long minCost = Math.min( requiredNoScoring.stream().mapToLong(ScorerSupplier::cost).min().orElse(Long.MAX_VALUE), requiredScoring.stream().mapToLong(ScorerSupplier::cost).min().orElse(Long.MAX_VALUE)); - List requiredScorers = new ArrayList<>(); - List scoringScorers = new ArrayList<>(); + List requiredScorers = new ArrayList<>(requiredNoScoring.size() + requiredScoring.size()); + List scoringScorers = new ArrayList<>(requiredScoring.size()); for (ScorerSupplier s : requiredNoScoring) { requiredScorers.add(s.get(randomAccess || s.cost() > minCost)); } @@ -206,7 +206,7 @@ protected boolean lessThan(ScorerSupplier a, ScorerSupplier b) { } return new MinShouldMatchSumScorer(weight, optionalScorers, minShouldMatch); } else { - final List optionalScorers = new ArrayList<>(); + final List optionalScorers = new ArrayList<>(optional.size()); for (ScorerSupplier scorer : optional) { optionalScorers.add(scorer.get(randomAccess)); } diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java index dc44d53cd8ac..aa0421c06705 100644 --- a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java +++ b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java @@ -49,7 +49,7 @@ final class BooleanWeight extends Weight { this.query = query; this.needsScores = needsScores; this.similarity = searcher.getSimilarity(needsScores); - weights = new ArrayList<>(); + weights = new ArrayList<>(query.clauses().size()); for (BooleanClause c : query) { Weight w = searcher.createWeight(c.getQuery(), needsScores && c.isScoring(), boost); weights.add(w); diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java index 9cddab884c6a..d964b7f9673c 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java @@ -66,7 +66,7 @@ public int freq() { @Override public Collection getChildren() { - ArrayList children = new ArrayList<>(); + ArrayList children = new ArrayList<>(scorers.length); for (Scorer scorer : scorers) { children.add(new ChildScorer(scorer, "MUST")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java index 44cbd522fc24..06be9904fb92 100644 --- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java @@ -179,7 +179,7 @@ public Query rewrite(IndexReader reader) throws IOException { } boolean actuallyRewritten = false; - List rewrittenDisjuncts = new ArrayList<>(); + List rewrittenDisjuncts = new ArrayList<>(disjuncts.length); for (Query sub : disjuncts) { Query rewrittenSub = sub.rewrite(reader); actuallyRewritten |= rewrittenSub != sub; diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java index c53942a71c13..e0c8ff455c60 100644 --- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java @@ -195,7 +195,7 @@ public final float score() throws IOException { @Override public final Collection getChildren() { - ArrayList children = new ArrayList<>(); + ArrayList children = new ArrayList<>(subScorers.size()); for (DisiWrapper scorer : subScorers) { children.add(new ChildScorer(scorer.scorer, "SHOULD")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java index 002375d161b2..509df435b449 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java @@ -53,8 +53,8 @@ public PostingsAndPosition(PostingsEnum postings, int offset) { this.docScorer = docScorer; this.needsScores = needsScores; - List iterators = new ArrayList<>(); - List postingsAndPositions = new ArrayList<>(); + List iterators = new ArrayList<>(postings.length); + List postingsAndPositions = new ArrayList<>(postings.length); for(PhraseQuery.PostingsAndFreq posting : postings) { iterators.add(posting.postings); postingsAndPositions.add(new PostingsAndPosition(posting.postings, posting.position)); diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java index 5cae1222cc05..e4b283c3453b 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java @@ -604,7 +604,7 @@ public C call() throws Exception { })); } - final List collectedCollectors = new ArrayList<>(); + final List collectedCollectors = new ArrayList<>(topDocsFutures.size()); for (Future future : topDocsFutures) { try { collectedCollectors.add(future.get()); diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java index c2c419c75e41..874dcea3d93c 100644 --- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java @@ -115,7 +115,7 @@ protected boolean lessThan(Long a, Long b) { addLead(new DisiWrapper(scorer)); } - List children = new ArrayList<>(); + List children = new ArrayList<>(scorers.size()); for (Scorer scorer : scorers) { children.add(new ChildScorer(scorer, "SHOULD")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java index 0146674eb301..f3b21e1c5b18 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java +++ b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java @@ -246,7 +246,7 @@ public synchronized void prune(Pruner pruner) throws IOException { // (not thread-safe since the values can change while // ArrayList is init'ing itself); must instead iterate // ourselves: - final List trackers = new ArrayList<>(); + final List trackers = new ArrayList<>(searchers.size()); for(SearcherTracker tracker : searchers.values()) { trackers.add(tracker); } diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java index 8165204c94cd..4860e1a28395 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java @@ -407,11 +407,11 @@ private ArrayList> gatherRptGroups(LinkedHashMap> tmp = new ArrayList<>(); ArrayList bb = ppTermsBitSets(rpp, rptTerms); unionTermGroups(bb); HashMap tg = termGroups(rptTerms, bb); HashSet distinctGroupIDs = new HashSet<>(tg.values()); + ArrayList> tmp = new ArrayList<>(distinctGroupIDs.size()); for (int i=0; i()); } diff --git a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java index f051d382b01e..c7a5a780c0a7 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java @@ -99,7 +99,9 @@ public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanat TopDocs hits = rescore(searcher, oneHit, 1); assert hits.totalHits == 1; - List subs = new ArrayList<>(); + SortField[] sortFields = sort.getSort(); + + List subs = new ArrayList<>(1 + sortFields.length); // Add first pass: Explanation first = Explanation.match(firstPassExplanation.getValue(), "first pass score", firstPassExplanation); @@ -108,7 +110,6 @@ public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanat FieldDoc fieldDoc = (FieldDoc) hits.scoreDocs[0]; // Add sort values: - SortField[] sortFields = sort.getSort(); for(int i=0;i subs = new ArrayList<>(); + List subs = new ArrayList<>(3); if (boostExpl.getValue() != 1.0f) subs.add(boostExpl); subs.add(stats.idf); diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java index 153fd7b09653..96214740a547 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java @@ -85,7 +85,7 @@ public float score(int doc, float freq) throws IOException { @Override public Explanation explain(int doc, Explanation freq) throws IOException { - List subs = new ArrayList<>(); + List subs = new ArrayList<>(subScorers.length); for (SimScorer subScorer : subScorers) { subs.add(subScorer.explain(doc, freq)); } diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java index 12ab1a2d0285..c37263de650b 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java @@ -485,7 +485,7 @@ public Explanation idfExplain(CollectionStatistics collectionStats, TermStatisti */ public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats[]) { double idf = 0d; // sum into a double before casting into a float - List subs = new ArrayList<>(); + List subs = new ArrayList<>(termStats.length); for (final TermStatistics stat : termStats ) { Explanation idfExplain = idfExplain(collectionStats, stat); subs.add(idfExplain); diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java index 8bb263338cae..c11df44971e5 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java @@ -86,7 +86,7 @@ ArrayList prepareConjunction(final LeafReaderContext context, Postings po if (littleSpans == null) { return null; } - ArrayList bigAndLittle = new ArrayList<>(); + ArrayList bigAndLittle = new ArrayList<>(2); bigAndLittle.add(bigSpans); bigAndLittle.add(littleSpans); return bigAndLittle; diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java index 7958f4758b0b..c21521ff8486 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java @@ -177,7 +177,7 @@ public String toString(String field) { @Override public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - List subWeights = new ArrayList<>(); + List subWeights = new ArrayList<>(clauses.size()); for (SpanQuery q : clauses) { subWeights.add(q.createWeight(searcher, false, boost)); } @@ -234,7 +234,7 @@ public void extractTerms(Set terms) { @Override public Query rewrite(IndexReader reader) throws IOException { boolean actuallyRewritten = false; - List rewrittenClauses = new ArrayList<>(); + List rewrittenClauses = new ArrayList<>(clauses.size()); for (int i = 0 ; i < clauses.size(); i++) { SpanQuery c = clauses.get(i); SpanQuery query = (SpanQuery) c.rewrite(reader); diff --git a/lucene/core/src/java/org/apache/lucene/util/Accountables.java b/lucene/core/src/java/org/apache/lucene/util/Accountables.java index cdf50fdc9e28..8853c85307bc 100644 --- a/lucene/core/src/java/org/apache/lucene/util/Accountables.java +++ b/lucene/core/src/java/org/apache/lucene/util/Accountables.java @@ -98,7 +98,7 @@ public static Accountable namedAccountable(String description, long bytes) { * will not be able to cast or manipulate the resources in any way. */ public static Collection namedAccountables(String prefix, Map in) { - List resources = new ArrayList<>(); + List resources = new ArrayList<>(in.size()); for (Map.Entry kv : in.entrySet()) { resources.add(namedAccountable(prefix + " '" + kv.getKey() + "'", kv.getValue())); } diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/Operations.java b/lucene/core/src/java/org/apache/lucene/util/automaton/Operations.java index 718a9089ce2a..1eefa3888823 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/Operations.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/Operations.java @@ -214,7 +214,7 @@ static public Automaton repeat(Automaton a, int count) { if (count == 0) { return repeat(a); } - List as = new ArrayList<>(); + List as = new ArrayList<>(count + 1); while (count-- > 0) { as.add(a); } @@ -242,7 +242,7 @@ static public Automaton repeat(Automaton a, int min, int max) { b = new Automaton(); b.copy(a); } else { - List as = new ArrayList<>(); + List as = new ArrayList<>(min); for(int i=0;i facetsWithSearch() throws IOException { FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc); // Retrieve results - List results = new ArrayList<>(); + List results = new ArrayList<>(2); // Count both "Publish Date" and "Author" dimensions Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); @@ -134,7 +134,7 @@ private List facetsOnly() throws IOException { searcher.search(new MatchAllDocsQuery(), fc); // Retrieve results - List results = new ArrayList<>(); + List results = new ArrayList<>(1); // Count both "Publish Date" and "Author" dimensions Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java index 06b9bf42acd6..d1ea1880b678 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java @@ -103,7 +103,7 @@ private List search() throws IOException { // Retrieve results Facets facets = new SortedSetDocValuesFacetCounts(state, fc); - List results = new ArrayList<>(); + List results = new ArrayList<>(2); results.add(facets.getTopChildren(10, "Author")); results.add(facets.getTopChildren(10, "Publish Year")); indexReader.close(); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java index 878613860bae..72456b0f2287 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java @@ -42,7 +42,7 @@ public SimpleFieldFragList( int fragCharSize ) { @Override public void add( int startOffset, int endOffset, List phraseInfoList ) { float totalBoost = 0; - List subInfos = new ArrayList<>(); + List subInfos = new ArrayList<>(phraseInfoList.size()); for( WeightedPhraseInfo phraseInfo : phraseInfoList ){ subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum(), phraseInfo.getBoost() ) ); totalBoost += phraseInfo.getBoost(); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java index aa8e9dd13e86..986aa5bac9e7 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java @@ -43,8 +43,7 @@ public WeightedFieldFragList( int fragCharSize ) { */ @Override public void add( int startOffset, int endOffset, List phraseInfoList ) { - List tempSubInfos = new ArrayList<>(); - List realSubInfos = new ArrayList<>(); + List tempSubInfos = new ArrayList<>(phraseInfoList.size()); HashSet distinctTerms = new HashSet<>(); int length = 0; @@ -64,7 +63,7 @@ public void add( int startOffset, int endOffset, List phrase // To avoid that fragments containing a high number of words possibly "outrank" more relevant fragments // we "bend" the length with a standard-normalization a little bit. float norm = length * ( 1 / (float)Math.sqrt( length ) ); - + List realSubInfos = new ArrayList<>(tempSubInfos.size()); float totalBoost = 0; for ( SubInfo tempSubInfo : tempSubInfos ) { float subInfoBoost = tempSubInfo.getBoost() * norm; diff --git a/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java index 368c2854a225..284557797e05 100644 --- a/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java @@ -74,14 +74,14 @@ public static void main(String[] args) throws Exception { if (args[1].equals("-l")) { is.listSegments(); } else if (args[1].equals("-d")) { - List segs = new ArrayList<>(); + List segs = new ArrayList<>(args.length - 2); for (int x = 2; x < args.length; x++) { segs.add(args[x]); } is.remove(segs.toArray(new String[0])); } else { Path targetDir = Paths.get(args[1]); - List segs = new ArrayList<>(); + List segs = new ArrayList<>(args.length - 2); for (int x = 2; x < args.length; x++) { segs.add(args[x]); } diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java index db67b9429dc9..94c0af14d1ee 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java @@ -132,7 +132,7 @@ public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcScore *= valSrcExpl.getValue(); } - List subs = new ArrayList<>(); + List subs = new ArrayList<>(1 + valSrcExpls.length); subs.add(subQueryExpl); for (Explanation valSrcExpl : valSrcExpls) { subs.add(valSrcExpl); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java index 3ee9c6ced0c4..5ca906d891f8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java @@ -211,7 +211,7 @@ protected Query getFieldQuery(String field, String queryText, boolean quoted) th protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException { if (field == null) { - List clauses = new ArrayList<>(); + List clauses = new ArrayList<>(fields.length); for (int i = 0; i < fields.length; i++) { clauses.add(getFuzzyQuery(fields[i], termStr, minSimilarity)); } @@ -224,7 +224,7 @@ protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) protected Query getPrefixQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList<>(); + List clauses = new ArrayList<>(fields.length); for (int i = 0; i < fields.length; i++) { clauses.add(getPrefixQuery(fields[i], termStr)); } @@ -236,7 +236,7 @@ protected Query getPrefixQuery(String field, String termStr) throws ParseExcepti @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList<>(); + List clauses = new ArrayList<>(fields.length); for (int i = 0; i < fields.length; i++) { clauses.add(getWildcardQuery(fields[i], termStr)); } @@ -249,7 +249,7 @@ protected Query getWildcardQuery(String field, String termStr) throws ParseExcep @Override protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException { if (field == null) { - List clauses = new ArrayList<>(); + List clauses = new ArrayList<>(fields.length); for (int i = 0; i < fields.length; i++) { clauses.add(getRangeQuery(fields[i], part1, part2, startInclusive, endInclusive)); } @@ -264,7 +264,7 @@ protected Query getRangeQuery(String field, String part1, String part2, boolean protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList<>(); + List clauses = new ArrayList<>(fields.length); for (int i = 0; i < fields.length; i++) { clauses.add(getRegexpQuery(fields[i], termStr)); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java index 586044c94d99..978717c90cd5 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java @@ -208,7 +208,7 @@ public QueryNode cloneTree() throws CloneNotSupportedException { // copy children if (this.values != null) { - List localValues = new ArrayList<>(); + List localValues = new ArrayList<>(this.values.size()); for (QueryText value : this.values) { localValues.add(value.clone()); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java index fca8386c670e..0c7dd55e2f1e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java @@ -127,7 +127,7 @@ public QueryNode cloneTree() throws CloneNotSupportedException { // copy children if (this.clauses != null) { - List localClauses = new ArrayList<>(); + List localClauses = new ArrayList<>(this.clauses.size()); for (QueryNode clause : this.clauses) { localClauses.add(clause.cloneTree()); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java index 4280d1c21df0..294d25d8fcf2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java @@ -65,7 +65,7 @@ else if (q1 instanceof AndQueryNode) QueryNode result = null; switch (op) { case NONE: - List children = new ArrayList<>(); + List children = new ArrayList<>(2); children.add(q1.cloneTree()); children.add(q2.cloneTree()); result = new AndQueryNode(children); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java index 4cb49c4f12d9..86c252da225e 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java @@ -97,8 +97,9 @@ protected IndexSearcher refreshIfNeeded(IndexSearcher old) throws IOException { if (old == null) { subs = null; } else { - subs = new ArrayList<>(); - for(LeafReaderContext ctx : old.getIndexReader().leaves()) { + List leaves = old.getIndexReader().leaves(); + subs = new ArrayList<>(leaves.size()); + for(LeafReaderContext ctx : leaves) { subs.add(ctx.reader()); } } diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/NearestNeighbor.java b/lucene/sandbox/src/java/org/apache/lucene/document/NearestNeighbor.java index 587c63fb7a3f..c48a7fc1e54b 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/document/NearestNeighbor.java +++ b/lucene/sandbox/src/java/org/apache/lucene/document/NearestNeighbor.java @@ -219,7 +219,7 @@ public int compare(NearestHit a, NearestHit b) { PriorityQueue cellQueue = new PriorityQueue<>(); NearestVisitor visitor = new NearestVisitor(hitQueue, n, pointLat, pointLon); - List states = new ArrayList<>(); + List states = new ArrayList<>(readers.size()); // Add root cell for each reader into the queue: int bytesPerDim = -1; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java index 0ca81c75667c..c0101fc1af76 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java @@ -687,7 +687,7 @@ protected List createResults(IndexSearcher searcher, TopFieldDocs throws IOException { List leaves = searcher.getIndexReader().leaves(); - List results = new ArrayList<>(); + List results = new ArrayList<>(hits.scoreDocs.length); for (int i=0;i getChildResources() { - List accountableList = new ArrayList<>(); + List accountableList = new ArrayList<>(readers.size()); for (Map.Entry readerEntry : readers.entrySet()) { accountableList.add(Accountables.namedAccountable(readerEntry.getKey(), readerEntry.getValue())); }