From 84a3d7e7a22a28c6c280d12b16d7f469bc403755 Mon Sep 17 00:00:00 2001 From: wuchengwei Date: Thu, 13 Jun 2024 21:20:02 +0800 Subject: [PATCH] update v3.0.0 --- .gitignore | 21 + FlagOpen.png | Bin LICENSE | 0 README.md | 140 +- README_zh.md | 155 +- config/cleaner_config.yaml | 0 contact_me.png | Bin dedup.png | Bin dist/flagdata-1.0.0-py3-none-any.whl | Bin dist/flagdata-1.0.0.tar.gz | Bin flagdata/__init__.py | 0 flagdata/all2txt/README.md | 0 flagdata/all2txt/README_zh.md | 0 flagdata/all2txt/__init__.py | 0 flagdata/all2txt/epub2txt.py | 0 flagdata/all2txt/pdf2txt.py | 0 flagdata/analysis/README.md | 0 flagdata/analysis/README_zh.md | 0 flagdata/analysis/__init__.py | 0 .../analysis/average_rotation_analysis.py | 0 .../analysis/data/average_rounds_data.jsonl | 0 .../data/language_distribution_data.jsonl | 0 flagdata/analysis/draw_pie_chart.py | 0 .../analysis/field_distribution_analysis.py | 0 .../language_distribution_analysis.py | 0 flagdata/analysis/nested_pie_chart.py | 0 .../png/field_distribution_analysis.gif | Bin .../png/language_distribution_analysis.png | Bin flagdata/analysis/text_length_analysis.py | 0 flagdata/cleaner/__init__.py | 0 flagdata/cleaner/arxiv_cleaner.py | 101 + flagdata/cleaner/base_cleaner.py | 69 + flagdata/cleaner/book_cleaner.py | 437 + flagdata/cleaner/cleaner_builder.py | 45 + flagdata/cleaner/configs/arxiv_clean.yaml | 14 + flagdata/cleaner/configs/book_clean.yaml | 20 + flagdata/cleaner/configs/book_config.json | 158 + flagdata/cleaner/configs/html_clean.yaml | 18 + flagdata/cleaner/configs/qa_clean.yaml | 0 flagdata/cleaner/configs/text_clean.yaml | 20 + flagdata/cleaner/docs/Book_Cleaner.md | 146 + flagdata/cleaner/docs/Book_Cleaner_ZH.md | 141 + flagdata/cleaner/docs/Qa_Cleaner.md | 43 + flagdata/cleaner/docs/Qa_Cleaner_ZH.md | 40 + flagdata/cleaner/docs/Text_Cleaner.md | 40 + flagdata/cleaner/html_cleaner.py | 39 + flagdata/cleaner/input/arxiv_demo_input.jsonl | 100 + .../book_demo_data/11836262_Wolf s Mate.epub | Bin 0 -> 1274963 bytes ...oches de terciopelo (Spanish Edition).epub | Bin 0 -> 367969 bytes ...\344\270\211\345\215\267\357\274\211.epub" | Bin 0 -> 1729002 bytes ...350\200\205 \346\234\210\345\205\263.epub" | Bin 0 -> 1022643 bytes ...45\245\227\350\243\2053\345\206\214].epub" | Bin 0 -> 1602982 bytes ...sajes desde el lago (Spanish Edition).epub | Bin 0 -> 538842 bytes .../11836298_La vida es corta pero ancha.epub | Bin 0 -> 1704841 bytes ...5_El Guardaespaldas (Spanish Edition).epub | Bin 0 -> 222973 bytes ...Catcher (Vera Stanhope series Book 7).mobi | Bin 0 -> 553176 bytes ...n Instalove Possessive Age Gap Romanc.epub | Bin 0 -> 286354 bytes ...the Innocents (The Keeper Witches 2) .epub | Bin 0 -> 351746 bytes flagdata/cleaner/input/html_demo_input.txt | 1 + flagdata/cleaner/input/qa_demo_input.jsonl | 1000 + flagdata/cleaner/input/ref.jsonl | 5000 ++++ flagdata/cleaner/input/text_demo_input.jsonl | 7 + flagdata/cleaner/qa_cleaner.py | 113 + flagdata/cleaner/resource/emojis.json | 24061 ---------------- flagdata/cleaner/text_cleaner.py | 276 +- flagdata/cleaner/tutorial_01_cleaner.md | 89 - flagdata/cleaner/utils/common_utils.py | 86 + flagdata/cleaner/utils/filter.py | 292 - flagdata/cleaner/utils/http_utils.py | 19 + flagdata/cleaner/utils/initializer.py | 42 - flagdata/cleaner/utils/loggings.py | 28 - flagdata/cleaner/utils/string_utils.py | 42 + flagdata/cleaner/utils/time_formatter.py | 42 +- flagdata/data_gen/README.md | 0 flagdata/data_gen/README_zh.md | 0 flagdata/data_gen/__init__.py | 0 flagdata/data_gen/example.py | 0 flagdata/data_gen/prompt_template.py | 0 flagdata/data_gen/strategy.py | 0 flagdata/data_gen/utils.py | 0 flagdata/data_operator/Operator.md | 113 + flagdata/data_operator/Operator_ZH.md | 114 + flagdata/data_operator/__init__.py | 1 + flagdata/data_operator/base_operator.py | 42 + flagdata/data_operator/formatter/__init__.py | 11 + .../data_operator/formatter/base_formatter.py | 56 + .../data_operator/formatter/csv_formatter.py | 21 + .../data_operator/formatter/json_formatter.py | 38 + .../formatter/parquet_formatter.py | 19 + .../data_operator/formatter/tsv_formatter.py | 19 + flagdata/data_operator/make_data.py | 28 + flagdata/data_operator/new_data.csv | 9 + flagdata/data_operator/new_data.json | 8 + flagdata/data_operator/new_data.parquet | Bin 0 -> 10341 bytes flagdata/data_operator/new_data.tsv | 9 + flagdata/data_operator/pruner/__init__.py | 14 + .../data_operator/pruner/catalogue_pruner.py | 57 + .../pruner/chinese_conversion_pruner.py | 41 + .../pruner/consecutive_newlines_pruner.py | 37 + .../pruner/control_char_pruner.py | 41 + .../data_operator/pruner/copyright_pruner.py | 53 + flagdata/data_operator/pruner/email_pruner.py | 37 + .../pruner/end_at_last_punctuation_pruner.py | 51 + .../pruner/figuret_able_caption_pruner.py | 42 + flagdata/data_operator/pruner/ip_pruner.py | 39 + .../pruner/latex_macro_expander_pruner.py | 48 + flagdata/data_operator/pruner/link_pruner.py | 37 + .../pruner/non_chinese_char_pruner.py | 41 + .../punctuation_normalization_pruner.py | 53 + .../pruner/repeat_sentence_pruner.py | 53 + .../data_operator/pruner/replace_pruner.py | 55 + .../pruner/specific_pattern_pruner.py | 44 + flagdata/data_operator/pruner/table_pruner.py | 38 + flagdata/data_operator/pruner/test.py | 121 + .../data_operator/pruner/unicode_pruner.py | 61 + .../data_operator/samplefilter/__init__.py | 20 + .../actionalbe_verb_num_filter.py | 75 + .../samplefilter/alphanumeric_ratio_filter.py | 42 + .../samplefilter/avg_line_length_filter.py | 41 + .../samplefilter/field_value_filter.py | 39 + .../flagged_words_ratio_filter.py | 69 + .../language_confidence_filter.py | 55 + .../samplefilter/max_line_length_filter.py | 39 + .../numeric_field_value_filter.py | 50 + .../special_character_ratio_filter.py | 75 + .../samplefilter/stropword_ratio_filter.py | 68 + .../samplefilter/suffix_filter.py | 30 + .../samplefilter/text_length_filter.py | 32 + .../samplefilter/token_num_filter.py | 42 + .../samplefilter/word_num_filter.py | 51 + .../word_repetition_ratio_filter.py | 59 + flagdata/data_operator/test.py | 267 + flagdata/data_operator/test_data.csv | 9 + flagdata/data_operator/test_data.json | 8 + flagdata/data_operator/test_data.parquet | Bin 0 -> 9491 bytes flagdata/data_operator/test_data.tsv | 9 + flagdata/deduplication/README.md | 0 flagdata/deduplication/README_zh.md | 0 flagdata/deduplication/__init__.py | 0 flagdata/deduplication/minhash.py | 0 flagdata/deduplication/stringMatching.py | 0 .../deduplication/udf_spark_stringMatching.py | 0 flagdata/language_identification/README.md | 0 flagdata/language_identification/README_zh.md | 0 flagdata/language_identification/__init__.py | 0 flagdata/language_identification/jsonql.py | 0 .../language_identification/split_by_lang.py | 0 .../quality_assessment/Bert/bert_config.yaml | 0 flagdata/quality_assessment/Bert/evaluate.py | 0 .../Bert/input_data/example_data.jsonl | 0 .../Bert/models/config.json | 0 .../models/tokenizer/special_tokens_map.json | 0 .../models/tokenizer/tokenizer_config.json | 0 .../Bert/models/tokenizer/vocab.txt | 0 .../network/document_bert_architectures.py | 0 .../model_architechure_bert_multi_scale.py | 0 .../quality_assessment/Bert/utils/data.py | 0 .../quality_assessment/Bert/utils/encode.py | 0 .../FastText/data/cleared1.jsonl | 0 .../FastText/data/cn_stopwords.txt | 0 .../quality_assessment/FastText/evaluate.py | 0 flagdata/quality_assessment/README.md | 0 flagdata/quality_assessment/README_zh.md | 0 .../quality_assessment/quality_assessment.png | Bin flagdata_logo.png | Bin pic/data_operator.png | Bin 0 -> 68232 bytes pic/some_operator.png | Bin 0 -> 26784 bytes pic/users.png | Bin 0 -> 368776 bytes pipeline.png | Bin pipeline_zh.png | Bin pyproject.toml | 0 quickstart/cleaner/run_cleaner.py | 0 quickstart/cleaner/run_custom_cleaner.py | 0 requirements.txt | 53 +- tests/test_cleaner.py | 0 175 files changed, 10597 insertions(+), 24832 deletions(-) create mode 100644 .gitignore mode change 100644 => 100755 FlagOpen.png mode change 100644 => 100755 LICENSE mode change 100644 => 100755 README.md mode change 100644 => 100755 README_zh.md mode change 100644 => 100755 config/cleaner_config.yaml mode change 100644 => 100755 contact_me.png mode change 100644 => 100755 dedup.png mode change 100644 => 100755 dist/flagdata-1.0.0-py3-none-any.whl mode change 100644 => 100755 dist/flagdata-1.0.0.tar.gz mode change 100644 => 100755 flagdata/__init__.py mode change 100644 => 100755 flagdata/all2txt/README.md mode change 100644 => 100755 flagdata/all2txt/README_zh.md mode change 100644 => 100755 flagdata/all2txt/__init__.py mode change 100644 => 100755 flagdata/all2txt/epub2txt.py mode change 100644 => 100755 flagdata/all2txt/pdf2txt.py mode change 100644 => 100755 flagdata/analysis/README.md mode change 100644 => 100755 flagdata/analysis/README_zh.md mode change 100644 => 100755 flagdata/analysis/__init__.py mode change 100644 => 100755 flagdata/analysis/average_rotation_analysis.py mode change 100644 => 100755 flagdata/analysis/data/average_rounds_data.jsonl mode change 100644 => 100755 flagdata/analysis/data/language_distribution_data.jsonl mode change 100644 => 100755 flagdata/analysis/draw_pie_chart.py mode change 100644 => 100755 flagdata/analysis/field_distribution_analysis.py mode change 100644 => 100755 flagdata/analysis/language_distribution_analysis.py mode change 100644 => 100755 flagdata/analysis/nested_pie_chart.py mode change 100644 => 100755 flagdata/analysis/png/field_distribution_analysis.gif mode change 100644 => 100755 flagdata/analysis/png/language_distribution_analysis.png mode change 100644 => 100755 flagdata/analysis/text_length_analysis.py create mode 100644 flagdata/cleaner/__init__.py create mode 100644 flagdata/cleaner/arxiv_cleaner.py create mode 100644 flagdata/cleaner/base_cleaner.py create mode 100644 flagdata/cleaner/book_cleaner.py create mode 100644 flagdata/cleaner/cleaner_builder.py create mode 100644 flagdata/cleaner/configs/arxiv_clean.yaml create mode 100644 flagdata/cleaner/configs/book_clean.yaml create mode 100644 flagdata/cleaner/configs/book_config.json create mode 100644 flagdata/cleaner/configs/html_clean.yaml create mode 100644 flagdata/cleaner/configs/qa_clean.yaml create mode 100644 flagdata/cleaner/configs/text_clean.yaml create mode 100644 flagdata/cleaner/docs/Book_Cleaner.md create mode 100644 flagdata/cleaner/docs/Book_Cleaner_ZH.md create mode 100644 flagdata/cleaner/docs/Qa_Cleaner.md create mode 100644 flagdata/cleaner/docs/Qa_Cleaner_ZH.md create mode 100644 flagdata/cleaner/docs/Text_Cleaner.md create mode 100644 flagdata/cleaner/html_cleaner.py create mode 100644 flagdata/cleaner/input/arxiv_demo_input.jsonl create mode 100755 flagdata/cleaner/input/book_demo_data/11836262_Wolf s Mate.epub create mode 100755 flagdata/cleaner/input/book_demo_data/11836269_Noches de terciopelo (Spanish Edition).epub create mode 100755 "flagdata/cleaner/input/book_demo_data/11836275_\343\200\212\346\234\257\345\243\253\347\232\204\346\214\207\347\216\257\343\200\213\345\220\210\351\233\206\357\274\210\347\254\254\344\270\200\343\200\201\344\272\214\343\200\201\344\270\211\345\215\267\357\274\211.epub" create mode 100755 "flagdata/cleaner/input/book_demo_data/11836283_\343\200\212\346\234\233\345\217\244\347\245\236\350\257\235\344\271\213\347\247\246\345\242\237\343\200\213\357\274\210\345\205\250\346\234\254\346\240\241\345\257\271\357\274\211\344\275\234\350\200\205 \346\234\210\345\205\263.epub" create mode 100755 "flagdata/cleaner/input/book_demo_data/11836286_\343\200\212\344\272\224\344\273\243\345\210\200\351\224\213\343\200\213[\351\225\277\347\257\207\345\216\206\345\217\262\345\260\217\350\257\264\302\267\345\256\236\344\275\223\344\271\246\347\211\210\345\245\227\350\243\2053\345\206\214].epub" create mode 100755 flagdata/cleaner/input/book_demo_data/11836294_Mensajes desde el lago (Spanish Edition).epub create mode 100755 flagdata/cleaner/input/book_demo_data/11836298_La vida es corta pero ancha.epub create mode 100755 flagdata/cleaner/input/book_demo_data/11836315_El Guardaespaldas (Spanish Edition).epub create mode 100755 flagdata/cleaner/input/book_demo_data/11836358_The Moth Catcher (Vera Stanhope series Book 7).mobi create mode 100755 flagdata/cleaner/input/book_demo_data/11836359_Intern For My Best Friend s Dad An Instalove Possessive Age Gap Romanc.epub create mode 100755 flagdata/cleaner/input/book_demo_data/11836366_Keeper of the Innocents (The Keeper Witches 2) .epub create mode 100644 flagdata/cleaner/input/html_demo_input.txt create mode 100644 flagdata/cleaner/input/qa_demo_input.jsonl create mode 100644 flagdata/cleaner/input/ref.jsonl create mode 100644 flagdata/cleaner/input/text_demo_input.jsonl create mode 100644 flagdata/cleaner/qa_cleaner.py delete mode 100644 flagdata/cleaner/resource/emojis.json delete mode 100644 flagdata/cleaner/tutorial_01_cleaner.md create mode 100644 flagdata/cleaner/utils/common_utils.py delete mode 100644 flagdata/cleaner/utils/filter.py create mode 100644 flagdata/cleaner/utils/http_utils.py delete mode 100644 flagdata/cleaner/utils/initializer.py delete mode 100644 flagdata/cleaner/utils/loggings.py create mode 100644 flagdata/cleaner/utils/string_utils.py mode change 100644 => 100755 flagdata/data_gen/README.md mode change 100644 => 100755 flagdata/data_gen/README_zh.md mode change 100644 => 100755 flagdata/data_gen/__init__.py mode change 100644 => 100755 flagdata/data_gen/example.py mode change 100644 => 100755 flagdata/data_gen/prompt_template.py mode change 100644 => 100755 flagdata/data_gen/strategy.py mode change 100644 => 100755 flagdata/data_gen/utils.py create mode 100644 flagdata/data_operator/Operator.md create mode 100644 flagdata/data_operator/Operator_ZH.md create mode 100644 flagdata/data_operator/__init__.py create mode 100644 flagdata/data_operator/base_operator.py create mode 100644 flagdata/data_operator/formatter/__init__.py create mode 100644 flagdata/data_operator/formatter/base_formatter.py create mode 100644 flagdata/data_operator/formatter/csv_formatter.py create mode 100644 flagdata/data_operator/formatter/json_formatter.py create mode 100644 flagdata/data_operator/formatter/parquet_formatter.py create mode 100644 flagdata/data_operator/formatter/tsv_formatter.py create mode 100644 flagdata/data_operator/make_data.py create mode 100644 flagdata/data_operator/new_data.csv create mode 100644 flagdata/data_operator/new_data.json create mode 100644 flagdata/data_operator/new_data.parquet create mode 100644 flagdata/data_operator/new_data.tsv create mode 100644 flagdata/data_operator/pruner/__init__.py create mode 100644 flagdata/data_operator/pruner/catalogue_pruner.py create mode 100644 flagdata/data_operator/pruner/chinese_conversion_pruner.py create mode 100644 flagdata/data_operator/pruner/consecutive_newlines_pruner.py create mode 100644 flagdata/data_operator/pruner/control_char_pruner.py create mode 100644 flagdata/data_operator/pruner/copyright_pruner.py create mode 100644 flagdata/data_operator/pruner/email_pruner.py create mode 100644 flagdata/data_operator/pruner/end_at_last_punctuation_pruner.py create mode 100644 flagdata/data_operator/pruner/figuret_able_caption_pruner.py create mode 100644 flagdata/data_operator/pruner/ip_pruner.py create mode 100644 flagdata/data_operator/pruner/latex_macro_expander_pruner.py create mode 100644 flagdata/data_operator/pruner/link_pruner.py create mode 100644 flagdata/data_operator/pruner/non_chinese_char_pruner.py create mode 100644 flagdata/data_operator/pruner/punctuation_normalization_pruner.py create mode 100644 flagdata/data_operator/pruner/repeat_sentence_pruner.py create mode 100644 flagdata/data_operator/pruner/replace_pruner.py create mode 100644 flagdata/data_operator/pruner/specific_pattern_pruner.py create mode 100644 flagdata/data_operator/pruner/table_pruner.py create mode 100644 flagdata/data_operator/pruner/test.py create mode 100644 flagdata/data_operator/pruner/unicode_pruner.py create mode 100644 flagdata/data_operator/samplefilter/__init__.py create mode 100644 flagdata/data_operator/samplefilter/actionalbe_verb_num_filter.py create mode 100644 flagdata/data_operator/samplefilter/alphanumeric_ratio_filter.py create mode 100644 flagdata/data_operator/samplefilter/avg_line_length_filter.py create mode 100644 flagdata/data_operator/samplefilter/field_value_filter.py create mode 100644 flagdata/data_operator/samplefilter/flagged_words_ratio_filter.py create mode 100644 flagdata/data_operator/samplefilter/language_confidence_filter.py create mode 100644 flagdata/data_operator/samplefilter/max_line_length_filter.py create mode 100644 flagdata/data_operator/samplefilter/numeric_field_value_filter.py create mode 100644 flagdata/data_operator/samplefilter/special_character_ratio_filter.py create mode 100644 flagdata/data_operator/samplefilter/stropword_ratio_filter.py create mode 100644 flagdata/data_operator/samplefilter/suffix_filter.py create mode 100644 flagdata/data_operator/samplefilter/text_length_filter.py create mode 100644 flagdata/data_operator/samplefilter/token_num_filter.py create mode 100644 flagdata/data_operator/samplefilter/word_num_filter.py create mode 100644 flagdata/data_operator/samplefilter/word_repetition_ratio_filter.py create mode 100644 flagdata/data_operator/test.py create mode 100644 flagdata/data_operator/test_data.csv create mode 100644 flagdata/data_operator/test_data.json create mode 100644 flagdata/data_operator/test_data.parquet create mode 100644 flagdata/data_operator/test_data.tsv mode change 100644 => 100755 flagdata/deduplication/README.md mode change 100644 => 100755 flagdata/deduplication/README_zh.md mode change 100644 => 100755 flagdata/deduplication/__init__.py mode change 100644 => 100755 flagdata/deduplication/minhash.py mode change 100644 => 100755 flagdata/deduplication/stringMatching.py mode change 100644 => 100755 flagdata/deduplication/udf_spark_stringMatching.py mode change 100644 => 100755 flagdata/language_identification/README.md mode change 100644 => 100755 flagdata/language_identification/README_zh.md mode change 100644 => 100755 flagdata/language_identification/__init__.py mode change 100644 => 100755 flagdata/language_identification/jsonql.py mode change 100644 => 100755 flagdata/language_identification/split_by_lang.py mode change 100644 => 100755 flagdata/quality_assessment/Bert/bert_config.yaml mode change 100644 => 100755 flagdata/quality_assessment/Bert/evaluate.py mode change 100644 => 100755 flagdata/quality_assessment/Bert/input_data/example_data.jsonl mode change 100644 => 100755 flagdata/quality_assessment/Bert/models/config.json mode change 100644 => 100755 flagdata/quality_assessment/Bert/models/tokenizer/special_tokens_map.json mode change 100644 => 100755 flagdata/quality_assessment/Bert/models/tokenizer/tokenizer_config.json mode change 100644 => 100755 flagdata/quality_assessment/Bert/models/tokenizer/vocab.txt mode change 100644 => 100755 flagdata/quality_assessment/Bert/network/document_bert_architectures.py mode change 100644 => 100755 flagdata/quality_assessment/Bert/network/model_architechure_bert_multi_scale.py mode change 100644 => 100755 flagdata/quality_assessment/Bert/utils/data.py mode change 100644 => 100755 flagdata/quality_assessment/Bert/utils/encode.py mode change 100644 => 100755 flagdata/quality_assessment/FastText/data/cleared1.jsonl mode change 100644 => 100755 flagdata/quality_assessment/FastText/data/cn_stopwords.txt mode change 100644 => 100755 flagdata/quality_assessment/FastText/evaluate.py mode change 100644 => 100755 flagdata/quality_assessment/README.md mode change 100644 => 100755 flagdata/quality_assessment/README_zh.md mode change 100644 => 100755 flagdata/quality_assessment/quality_assessment.png mode change 100644 => 100755 flagdata_logo.png create mode 100644 pic/data_operator.png create mode 100644 pic/some_operator.png create mode 100644 pic/users.png mode change 100644 => 100755 pipeline.png mode change 100644 => 100755 pipeline_zh.png mode change 100644 => 100755 pyproject.toml mode change 100644 => 100755 quickstart/cleaner/run_cleaner.py mode change 100644 => 100755 quickstart/cleaner/run_custom_cleaner.py mode change 100644 => 100755 requirements.txt mode change 100644 => 100755 tests/test_cleaner.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5d5f6de --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +# Ignore the .idea directory +.idea/ + +# Build and Release Folders +bin-debug/ +bin-release/ +[Oo]bj/ +[Bb]in/ + +# Other files and folders +.settings/ + +# Executables +*.swf +*.air +*.ipa +*.apk + +# Project files, i.e. `.project`, `.actionScriptProperties` and `.flexProperties` +# should NOT be excluded as they contain compiler settings and other important +# information for Eclipse / Flash Builder. diff --git a/FlagOpen.png b/FlagOpen.png old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 index b7412e6..dc39a9d --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ ![FlagData](flagdata_logo.png) [![Pypi Package](https://img.shields.io/pypi/v/flagdata?label=pypi%20package)](https://pypi.org/project/flagdata/) -[![Python Application](https://github.com/FlagOpen/FlagData/actions/workflows/python-app.yml/badge.svg)](https://github.com/FlagOpen/FlagData/actions/workflows/python-app.yml) [![License](https://img.shields.io/github/license/FlagOpen/FlagData.svg?color=blue)](https://github.com/FlagOpen/FlagData/blob/main/LICENSE) ![GitHub release (release name instead of tag name)](https://img.shields.io/github/v/release/FlagOpen/FlagData?include_prereleases&style=social) @@ -30,7 +29,7 @@ The complete pipeline process and features such as ![pipeline](pipeline.png) ## News - +- [June 13st, 2024] FlagData v3.0.0 update, supports multiple data types, dozens of operator pools for DIY, and generates high-quality data with one click - [Dec 31st, 2023] FlagData v2.0.0 has been upgraded - [Jan 31st, 2023] FlagData v1.0.0 is online! @@ -49,10 +48,29 @@ The complete pipeline process and features such as - [Configuration](#Configuration) - [Data cleaning](#Data-cleaning) - [Data Quality assessment](#Data-Quality-assessment) -- [Contact us](#Contact-us) +- [Operator Pool](#Operator-Pool) +- [Strong community support](#Strong-community-support) +- [Users](#Users) - [Reference project](#Reference-project) - [License](#License) +# V3.0.0 UPDATE +With the feedback from the community, FlagData has been upgraded. This update provides a set of fool-proof language pre-training data construction tools. According to different data types, we provide one-click data quality improvement tasks such as Html, Text, Book, Arxiv, Qa, etc. Both novice users and advanced users can easily generate high-quality data. +- Novice users: Just confirm the data type to generate high-quality data. +- Advanced users: We provide dozens of operator pools for users to DIY their own LLM pre-training data construction process. + +**Project Features:** + +- Ease of use: Fool-style operation, simple configuration is all that is needed to generate high-quality data. +- Flexibility: Advanced users can customize the data construction process through various operator pools. +- Diversity: Supports multiple data types (HTML, Web, Wiki, Book, Paper, QA, Redpajama, Code) + +**Key highlights** + +- 🚀 Generate high-quality data with one click +- 🔧 Dozens of operator pools for DIY +- 🌐 Support for multiple data types + ## Installation - Under the requirements.txt file, are all the dependent packages of the FlagData project @@ -61,14 +79,6 @@ The complete pipeline process and features such as pip install -r requirements.txt ``` -Optionally install the `cleaner` module required in FlagData. You will only install the dependency packages for the -corresponding modules, which is suitable for users who only want to use the `cleaner` module and do not want to install -other module dependency packages. - -```bash -pip install flagdata[cleaner] -``` - **Install the latest version of the main branch** The main branch is officially released by FlagData. If you want to install / update to the latest version of the main @@ -76,14 +86,6 @@ branch, use the following command: ``` git clone https://github.com/FlagOpen/FlagData.git -pip install .[all] -``` - -**Secondary development based on source code** - -```bash -git clone https://github.com/FlagOpen/FlagData.git -pip install -r requirements.txt ``` ## Quick Start @@ -102,7 +104,7 @@ different strategies. The strategies include: answers. In order to increase the diversity of generated samples, it is supported to exclude already generated samples. -See [ReadMe under data_gen Module](flagdata/data_gen/README.md) for an example. +See [Instructions for using the Data Enhancement Module](flagdata/data_gen/README.md) for an example. ### Data preparation phase @@ -115,7 +117,7 @@ Title [Chapter Title]", "Address [E-mail]","PageBreak", "Header [Header]", "Foot UncategorizedText [arxiv vertical number]", " Image, Formula, etc. Tool scripts provide two forms: keeping full text and saving by category resolution. -See [ReadMe under all2txt Module](flagdata/all2txt/README.md) for an example. +See [Instructions for using all2txt modules](flagdata/all2txt/README.md) for an example. ### Data preprocessing phase @@ -131,43 +133,33 @@ finally outputs a score of 0: 1. + For general cleaning rules, if it is greater than 0.5, it is classified as a specific language, otherwise it indicates that the page is not sure what language it is and discards the page. -See [ReadMe under language_identification Module](flagdata/language_identification/README.md) for an example. +See [Instructions for using the language identification module](flagdata/language_identification/README.md) for an example. #### Data cleaning -The cleaner module uses multi-process pool mp.Pool to process data in parallel in a multi-process manner. Use -SharedMemoryManager to create shareable data structures, and multiple processes share data in data processing. - -Efficient data cleaning is achieved through multi-processes and shared memory: +We provide one-click data quality improvement tasks such as Html, Text, Book, Arxiv, Qa, etc. For more customized functions, users can refer to the "data_operator" section. +##### TextCleaner +TextCleaner provides a fast and extensible text data cleaning tool. It provides commonly used text cleaning modules. +Users only need to select the text_clean.yaml file in cleaner_builder.py to process text data. +For details, see[Instructions for using TextCleaner](flagdata/cleaner/docs/Text_Cleaner.md) -Currently, the following cleaning rules are included: +##### ArxivCleaner +ArxivCleaner provides a commonly used arxiv text data cleaning tool. +Users only need to select the arxiv_clean.yaml file in cleaner_builder.py to process arxiv data. -+ Emoticons and meaningless characters (regular) -+ Clean and reprint copyright notice information (Zhihu, csdn, brief book, blog park) -+ Remove unreasonable consecutive punctuation marks, and newline characters are unified as\ n -+ Remove personal privacy, URL and extra spaces such as mobile phone number and ID number -+ Remove irrelevant content such as beginning and end, and remove text whose length is less than n (currently nasty 100) -+ Convert simplified Chinese to traditional Chinese (opencc Library) +##### HtmlCleaner +HtmlCleaner provides commonly used Html format text extraction and data cleaning tools. +Users only need to run the main method to process arxiv data. -It takes only two steps to use the data cleaning feature of FlagData: - -1. Modify the data path and format in the YAML configuration file. We give detailed comments on each parameter in the - configuration file template to explain its meaning. At the same time, you can refer - to [Configuration](#Configuration) Chapter. - -2. Specify the configuration file path in the following code and run it - ```python - from flagdata.cleaner.text_cleaner import DataCleaner - if __name__ == "__main__": # Safe import of main module in multi-process - cleaner = DataCleaner("config.yaml") - cleaner.clean() - ``` +##### QaCleaner +QaCleaner provides commonly used Qa format text extraction and data cleaning tools. +Users only need to run the main method to process Qa data. +For details, see[Instructions for using Qa](flagdata/cleaner/docs/Qa_Cleaner.md) -The cleaned file will be saved in the format `jsonl` to the path corresponding to the `output` parameter specified in -the configuration file. - -See [Tutorial 1: Clean the original text obtained from the Internet](/flagdata/cleaner/tutorial_01_cleaner.md) for an -example. +##### BookCleaner +BookCleaner provides a common book format text extraction and data cleaning tool. +Users only need to run the main method to process the book data. +For details, see[Instructions for using Book](flagdata/cleaner/docs/Book_Cleaner.md) #### Quality assessment @@ -182,7 +174,7 @@ This paper compares different text classification models, including logical regr their performance. In the experiment, BERTEval and FastText models perform well in text classification tasks, and FastText model performs best in terms of accuracy and recall rate. [experimental results are from ChineseWebText] -See [ReadMe under quality_assessment Module](flagdata/quality_assessment/README.md) for an example. +See [Instructions for using the quality assessment module](flagdata/quality_assessment/README.md) for an example. #### Data deduplication @@ -196,6 +188,7 @@ to retain only those texts that are very similar, while discard those texts with default value is 0.87. At the same time, we use the distributed computing power of Spark to deal with large-scale data, the idea of MapReduce is used to remove duplicates, and tuned by spark to deal with large-scale text data sets efficiently. + The following is the similar text iterated in the process of data deduplication, which has slight differences in line wrapping and name editing, but the deduplication algorithm can identify two paragraphs of text that are highly similar. @@ -253,13 +246,13 @@ The analysis data analysis module provides the following functions: + length analysis of the text. -See [ReadMe under analysis Module](flagdata/analysis/README.md) for an example. +See [Instructions for using the analysis module](flagdata/analysis/README.md) for an example. ## Configuration For the `data cleansing` and `data quality assessment` modules, We provide a profile -template:[cleaner_config.yaml](https://dorc.baai.ac.cn/resources/projects/FlagData/cleaner_config.yaml), [bert_config.yaml](flagdata/quality_assessment/Bert/bert_config.yaml)。 +template:[text_clean.yaml、arxiv_clean.yaml](flagData/cleaner/configs), [bert_config.yaml](flagdata/quality_assessment/Bert/bert_config.yaml)。 The configuration file is readable [YAML](https://yaml.org) format , provides detailed comments. Please make sure that the parameters have been modified in the configuration file before using these modules. @@ -268,10 +261,16 @@ Here are some important parameters you need to pay attention to: ### Data cleaning ```yaml - # Raw data to be cleaned + # 待清洗的原始数据 input: ./demo/demo_input.jsonl - # Save path of data after cleaning + # 清洗后数据的保存路径 output: ./demo/output.jsonl + # 待处理的字段 + source_key: text + # key in the output file for saving + result_key: cleanedContent + # 需要选择的Pipline类 + cleaner_class: ArxivCleaner ``` ### Data Quality assessment @@ -283,20 +282,37 @@ Here are some important parameters you need to pay attention to: # The text_key field is the field being evaluated text_key: "raw_content" ``` +## Operator Pool +We provide some basic operators for data cleaning, filtering, format conversion, etc. to help users build their own data construction process. + +The operators provided are divided into three types: Formatter, Pruner, and Filter. Formatter is used to process structured data and can be used for mutual conversion of data in different formats; Pruner is used to clean text data; Filter is used for sample filtering. +The figure below shows these operators in different processing locations and a list of some of the operators -## Contact us + -If you have any questions about the use and code of this project, you can submit issue. At the same time, you can -contact us directly through data@baai.ac.cn. + -An active community is inseparable from your contribution, if you have a new idea, welcome to join our community, let us -become a part of open source, together to contribute our own efforts for open source! +For detailed description, see[Instructions for using the data operator](flagdata/data_operator/Operator_ZH.md) +## Strong community support +### Community Support +If you have any questions about the use and code of this project, you can submit an issue. You can also contact us directly via email at data@baai.ac.cn; + +An active community cannot be separated from your contribution. If you have a new idea, welcome to join our community, let us become part of open source, and contribute to open source together! ! ! -Or follow Zhiyuan FlagOpen open source system, FlagOpen official website https://flagopen.baai.ac.cn/ +Or follow the FlagOpen open source system, FlagOpen official website https://flagopen.baai.ac.cn/ ![contact_me](FlagOpen.png) +### Questions and Feedback +- Please report issues and make suggestions through GitHub Issues, and we will respond quickly within 24 hours. +- You are also welcome to discuss actively in GitHub Discussions. +- If it is inconvenient to use GitHub, of course, everyone in the FlagData open source community can also speak freely. For reasonable suggestions, we will iterate in the next version. + We will invite experts in the field to hold online and offline exchanges regularly to share the latest LLM research results. +## Users + + + ## Reference project Part of this project is referenced from the following code: diff --git a/README_zh.md b/README_zh.md old mode 100644 new mode 100755 index ae2ba9d..da4de50 --- a/README_zh.md +++ b/README_zh.md @@ -2,7 +2,6 @@ ![FlagData](flagdata_logo.png) [![Pypi Package](https://img.shields.io/pypi/v/flagdata?label=pypi%20package)](https://pypi.org/project/flagdata/) -[![Python Application](https://github.com/FlagOpen/FlagData/actions/workflows/python-app.yml/badge.svg)](https://github.com/FlagOpen/FlagData/actions/workflows/python-app.yml) [![License](https://img.shields.io/github/license/FlagOpen/FlagData.svg?color=blue)](https://github.com/FlagOpen/FlagData/blob/main/LICENSE) ![GitHub release (release name instead of tag name)](https://img.shields.io/github/v/release/FlagOpen/FlagData?include_prereleases&style=social) @@ -25,7 +24,7 @@ FlagData支持以下特性: ![pipeline](pipeline_zh.png) ## 动态 - +- [June 13st, 2024] FlagData v3.0.0 update,支持多种数据类型,多达几十种算子池供DIY,一键生成高质量数据 - [Dec 31st, 2023] FlagData v2.0.0 升级 - [Jan 31st, 2023] FlagData v1.0.0 上线了! @@ -44,9 +43,28 @@ FlagData支持以下特性: - [配置](#3配置) - [数据清洗](#31数据清洗) - [数据质量评估](#32数据质量评估) -- [联系我们](#4联系我们) -- [参考项目](#5参考项目) -- [许可证](#6许可证) +- [算子池](#4算子池) +- [强有力的社区支持](#5强有力的社区支持) +- [使用用户](#6使用用户) +- [许可证](#7参考项目) +- [许可证](#8许可证) + +# V3.0.0 UPDATE + 在社区运营的反馈下,FlagData迎来了功能升级。本次更新提供了一套傻瓜式的语言预训练数据构造工具。根据不同的数据类型,我们提供了例如 Html、Text、Book、Arxiv、Qa 等一键式数据质量提升任务。无论是小白用户还是进阶用户都能轻松生成高质量的数据。 +- 小白用户:只需确认数据类型,即可生成高质量数据。 +- 进阶用户:我们提供了几十种算子池,供用户DIY自己的LLM预训练数据构造过程。 + +**项目特点:** + +- 易用性:傻瓜式操作,只需简单配置即可生成高质量数据。 +- 灵活性:进阶用户可通过多种算子池自定义数据构造过程。 +- 多样性:支持多种数据类型(HTML、Web、Wiki、Book、Paper、QA、Redpajama、Code) + +**核心亮点** + +- 🚀 一键生成高质量数据 +- 🔧 多达几十种算子池供DIY +- 🌐 支持多种数据类型 ## 1、安装 @@ -56,26 +74,12 @@ FlagData支持以下特性: pip install -r requirements.txt ``` -选择性安装FlagData中所需的`cleaner`模块 。你将只会安装对应模块的依赖包,这适合那些只想使用`cleaner`模块且不想安装其他模块依赖包的使用者。 - -```bash -pip install flagdata[cleaner] -``` - **安装main分支的最新版本** main分支为FlagData正式发布的分支,如果你想安装/更新到main分支的最新版本,请使用以下命令: ``` git clone https://github.com/FlagOpen/FlagData.git -pip install .[all] -``` - -**基于源码二次开发** - -```bash -git clone https://github.com/FlagOpen/FlagData.git -pip install -r requirements.txt ``` ## 2、快速上手 @@ -89,7 +93,7 @@ pip install -r requirements.txt + AbilityExtractionGenerator: 利用LLM接口,归纳出若干案例样本中包含的能力。根据这个能力集合,生成新样本和答案。 + AbilityDirectGenerator: 根据指定的能力类型,或者任务类型,直接生成与该能力或任务相关的新样本。例如,指定能力为“逻辑推理”,则可生成一系列逻辑推理题目及答案。为增强生成样本的多样性,支持排除已生成样本。 -具体示例见[数据增强模块下的readMe](flagdata/data_gen/README_zh.md) +具体示例见[数据增强模块的使用说明](flagdata/data_gen/README_zh.md) ### 2.2、数据准备阶段 @@ -100,7 +104,7 @@ ListItem【参考文献】", " Title【章节标题】", "Address【邮箱地址】","PageBreak", "Header【页眉】", "Footer【页脚】", "UncategorizedText【arxiv竖排编号】", " Image(图)", "Formula(公式)" 等,工具脚本提供保留全文,以及按照类别解析保存两种形式。 -具体示例见[all2txt模块下的readMe](flagdata/all2txt/README_zh.md) +具体示例见[all2txt模块的使用说明](flagdata/all2txt/README_zh.md) ### 2.3、数据预处理阶段 @@ -114,40 +118,32 @@ Wikipedia、Tatoeba、SETimes + 对于每一个网页做一次语言分类,得到分类的分数。 + 对于一般清洗规则,如果大于 0.5,那么就分类为某个特定的语言,否则表示不确定是什么语言的网页并丢掉这个网页。 -具体示例见[language_identification模块下的readMe](flagdata/language_identification/README_zh.md) +具体示例见[language_identification模块的使用说明](flagdata/language_identification/README_zh.md) #### 2.3.2、数据清洗 - -  cleaner模块,使用多进程池 mp.Pool,通过多进程方式并行处理数据。使用 SharedMemoryManager -创建可共享的数据结构,在数据处理中多进程共享数据。 - -  通过多进程和共享内存的方式实现了高效的数据清洗: - -目前包含如下清洗规则: - -+ 表情符号和无意义字符(正则) -+ 清洗转载版权声明信息(知乎、csdn、简书、博客园) -+ 去除不合理的连续标点符号,换行符统一为\n -+ 去除手机号、身份证号等个人隐私、URL和额外的空格 -+ 去除开头、结尾等无关内容,去除长度小于n的文本(目前n=100) -+ 简体中文转换为繁体中文(opencc库) - -使用FlagData的数据清洗功能仅需两步: - -1.修改YAML配置文件中的数据路径与格式。我们在配置文件模板中为每个参数给出了详细的注释来解释其含义。同时你也可以参考[配置](#配置) -章节。 - -2. 在以下代码中指定配置文件路径,运行即可 - ```python - from flagdata.cleaner.text_cleaner import DataCleaner - if __name__ == "__main__": # 多进程中主模块安全导入 - cleaner = DataCleaner("config.yaml") - cleaner.clean() - ``` - -清洗后的文件会以`jsonl`的格式保存到配置文件中指定的`output`参数对应的路径。 - -具体示例见[Tutorial 1: 清洗从互联网上获取到的原始文本](/flagdata/cleaner/tutorial_01_cleaner.md) +我们提供了例如 Html、Text、Book、Arxiv、Qa 等一键式数据质量提升任务。更多自定义的功能部分,用户可以参阅“data_operator”部分。 +##### TextCleaner +TextCleaner提供了一个快速且可扩展的文本数据清理工具。它提供常用的文本清理模块。 +使用者仅需在cleaner_builder.py选择 text_clean.yaml文件,便可进行text数据的处理。 +具体说明见[TextCleaner 的使用说明](flagdata/cleaner/docs/Text_Cleaner.md) + +##### ArxivCleaner +ArxivCleaner提供了常用的arxiv文本数据清理工具。 +使用者仅需在cleaner_builder.py选择 arxiv_clean.yaml文件,便可进行arxiv数据的处理。 + +##### HtmlCleaner +HtmlCleaner提供了常用的Html格式的文本抽取、数据清理工具。 +使用者仅需运行main方法,便可进行arxiv数据的处理。 + +##### QaCleaner +QaCleaner提供了常用的Qa格式的文本抽取、数据清理工具。 +使用者仅需运行main方法,便可进行Qa数据的处理。 +具体说明见[Qa 的使用说明](flagdata/cleaner/docs/Qa_Cleaner_ZH.md) + +##### BookCleaner +BookCleaner提供了常用的Book格式的文本抽取、数据清理工具。 +使用者仅需运行main方法,便可进行Book数据的处理。 +具体说明见[Book 的使用说明](flagdata/cleaner/docs/Book_Cleaner_ZH.md) #### 2.3.3、质量评估 @@ -158,13 +154,14 @@ Wikipedia、Tatoeba、SETimes 文章比较了不同的文本分类模型,包括逻辑回归、BERT和FastText,以评估它们的性能。在实验中,BERTEval和FastText模型在文本分类任务中表现良好,其中FastText模型在精度和召回率方面表现最佳。【实验结果来自ChineseWebText】 -具体示例见[quality_assessment模块下的readMe](flagdata/quality_assessment/README_zh.md) +具体示例见[quality_assessment模块的使用说明](flagdata/quality_assessment/README_zh.md) #### 2.3.4、数据去重   deduplication模块下,提供海量文本数据去重能力,该阶段使用的是MinHashLSH(最小哈希局部敏感哈希)通过将文本转换为一系列哈希值,以便比较文本之间的相似性。   我们可以通过控制参数threshold,它代表了相似性的阈值,threshold值的范围是从0到1。设置为1时意味着完全匹配,任何文本都不会被过滤掉。相反,如果设置了较低的threshold值,相似性稍微高一些的文本也会被保留,我们可以根据需要设置更高的threshold值,以便只保留那些非常相似的文本,而丢弃那些相似性稍微低一些的文本,经验默认值为0.87;同时我们利用了Spark的分布式计算能力处理大规模数据,使用了MapReduce思想来实现去重,同时经spark调优,来高效地处理大规模文本数据集。 + 如下是在数据去重过程中迭代计算的相似文本,该文本在换行、编辑姓名等方面有细微区别,但是去重算法可以识别出两段文本高度相似。 ```json lines @@ -196,7 +193,9 @@ Spark 的原生算子来进行处理。 deduplication模块下提供了普通Python函数(判断是否是其他字符串的子字符串)使用spark udf的改写,可以方便的使用spark分布式能力,详细请见`stringMatching.py`和`udf_spark_stringMatching.py`的对比 -如果用户只是单单将python函数改成spark任务,如果没有spark集群是不行的。这里详细的写了傻瓜式搭建集群的文档,方便小白用户使用。具体示例见[spark集群搭建](flagdata/deduplication/README_zh.md) +如果用户只是单单将python函数改成spark任务,如果没有spark集群是不行的。这里详细的写了傻瓜式搭建集群的文档,方便小白用户使用。 + +具体示例见[spark集群搭建](flagdata/deduplication/README_zh.md) ### 2.4、数据分析阶段 @@ -209,12 +208,12 @@ analysis数据分析模块提供如下功能: (4)文本的长度分析 -具体详细示例见[analysis模块下的readMe](flagdata/analysis/README_zh.md) +具体详细示例见[analysis模块的使用说明](flagdata/analysis/README_zh.md) ## 3、配置 针对`数据清洗`、`数据质量评估`模块, -我们提供了配置文件模板:[cleaner_config.yaml](https://dorc.baai.ac.cn/resources/projects/FlagData/cleaner_config.yaml), [bert_config.yaml](flagdata/quality_assessment/Bert/bert_config.yaml)。 +我们提供了配置文件模板:[text_clean.yaml、arxiv_clean.yaml](flagData/cleaner/configs), [bert_config.yaml](flagdata/quality_assessment/Bert/bert_config.yaml)。 配置文件为易读的 [YAML](https://yaml.org) 格式,并提供了详尽的注释。使用这些模块前请确认已经在配置文件中修改好相应参数。 以下是一些你需要注意的重要参数: @@ -226,6 +225,12 @@ analysis数据分析模块提供如下功能: input: ./demo/demo_input.jsonl # 清洗后数据的保存路径 output: ./demo/output.jsonl + # 待处理的字段 + source_key: text + # key in the output file for saving + result_key: cleanedContent + # 需要选择的Pipline类 + cleaner_class: ArxivCleaner ``` ### 3.2、数据质量评估 @@ -238,28 +243,50 @@ analysis数据分析模块提供如下功能: text_key: "raw_content" ``` -## 4、联系我们 +## 4、算子池 +我们提供了一些用于数据清洗,过滤,格式转换等的基本算子,帮助用户构建自己的数据构建流程。 + +提供的算子分为三种:Formatter、Pruner、Filter。Formatter用于处理结构化数据,可以用于不同格式数据的相互转换;Pruner用于清洗文本数据;Filter用于样本过滤。 +下图是这些算子位于不同的处理位置以及其中一些operator的列表 + + + + +具体详细说明见[data_operator的使用说明](flagdata/data_operator/Operator_ZH.md) + +## 5、强有力的社区支持 +### 社区支持 如果你对本项目的使用和代码有任何问题,可以提交issue。同时你也可以通过邮箱 data@baai.ac.cn 直接联系我们; 一个活跃的社区离不开你的贡献,如果你有新的idea,欢迎加入我们的社区,让我们成为开源的一部分,一起为开源贡献自己的力量!!! + 或者关注智源 FlagOpen 开源体系,FlagOpen 官网 https://flagopen.baai.ac.cn/ ![contact_me](FlagOpen.png) -## 5、参考项目 +### 问题和反馈 +- 请通过GitHub Issues 来反馈问题和提出建议,我们将在24h内快速响应。 +- 同时也欢迎大家在GitHub Discussions积极讨论。 +- 如果不方便使用GitHub ,当然FlagData开源社区大家也可以畅所欲言,对于合理的建议,我们会在下一个版本中进行迭代。 +- 我们会邀请领域专家 定期举办线上、线下交流会 分享最新的LLM的研究成果。 +## 6、使用用户 + + + +## 7、参考项目 本项目部分参考自以下代码: -[GeneralNewsExtractor](https://github.com/GeneralNewsExtractor/GeneralNewsExtractor), -[text-data-distillation](https://github.com/arumaekawa/text-dataset-distillation), -[emoji](https://github.com/carpedm20/emoji), -[transformers](https://github.com/huggingface/transformers)。 +[GeneralNewsExtractor](https://github.com/GeneralNewsExtractor/GeneralNewsExtractor) +[text-data-distillation](https://github.com/arumaekawa/text-dataset-distillation) +[emoji](https://github.com/carpedm20/emoji) +[transformers](https://github.com/huggingface/transformers) [ChineseWebText](https://github.com/CASIA-LM/ChineseWebText) [lid](https://github.com/facebookresearch/cc_net) [unstructured](https://github.com/Unstructured-IO/unstructured) [minHash](https://github.com/ChenghaoMou/text-dedup) -## 6、许可证 +## 8、许可证 FlagData项目基于 [Apache 2.0 协议](LICENSE)。 diff --git a/config/cleaner_config.yaml b/config/cleaner_config.yaml old mode 100644 new mode 100755 diff --git a/contact_me.png b/contact_me.png old mode 100644 new mode 100755 diff --git a/dedup.png b/dedup.png old mode 100644 new mode 100755 diff --git a/dist/flagdata-1.0.0-py3-none-any.whl b/dist/flagdata-1.0.0-py3-none-any.whl old mode 100644 new mode 100755 diff --git a/dist/flagdata-1.0.0.tar.gz b/dist/flagdata-1.0.0.tar.gz old mode 100644 new mode 100755 diff --git a/flagdata/__init__.py b/flagdata/__init__.py old mode 100644 new mode 100755 diff --git a/flagdata/all2txt/README.md b/flagdata/all2txt/README.md old mode 100644 new mode 100755 diff --git a/flagdata/all2txt/README_zh.md b/flagdata/all2txt/README_zh.md old mode 100644 new mode 100755 diff --git a/flagdata/all2txt/__init__.py b/flagdata/all2txt/__init__.py old mode 100644 new mode 100755 diff --git a/flagdata/all2txt/epub2txt.py b/flagdata/all2txt/epub2txt.py old mode 100644 new mode 100755 diff --git a/flagdata/all2txt/pdf2txt.py b/flagdata/all2txt/pdf2txt.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/README.md b/flagdata/analysis/README.md old mode 100644 new mode 100755 diff --git a/flagdata/analysis/README_zh.md b/flagdata/analysis/README_zh.md old mode 100644 new mode 100755 diff --git a/flagdata/analysis/__init__.py b/flagdata/analysis/__init__.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/average_rotation_analysis.py b/flagdata/analysis/average_rotation_analysis.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/data/average_rounds_data.jsonl b/flagdata/analysis/data/average_rounds_data.jsonl old mode 100644 new mode 100755 diff --git a/flagdata/analysis/data/language_distribution_data.jsonl b/flagdata/analysis/data/language_distribution_data.jsonl old mode 100644 new mode 100755 diff --git a/flagdata/analysis/draw_pie_chart.py b/flagdata/analysis/draw_pie_chart.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/field_distribution_analysis.py b/flagdata/analysis/field_distribution_analysis.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/language_distribution_analysis.py b/flagdata/analysis/language_distribution_analysis.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/nested_pie_chart.py b/flagdata/analysis/nested_pie_chart.py old mode 100644 new mode 100755 diff --git a/flagdata/analysis/png/field_distribution_analysis.gif b/flagdata/analysis/png/field_distribution_analysis.gif old mode 100644 new mode 100755 diff --git a/flagdata/analysis/png/language_distribution_analysis.png b/flagdata/analysis/png/language_distribution_analysis.png old mode 100644 new mode 100755 diff --git a/flagdata/analysis/text_length_analysis.py b/flagdata/analysis/text_length_analysis.py old mode 100644 new mode 100755 diff --git a/flagdata/cleaner/__init__.py b/flagdata/cleaner/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/flagdata/cleaner/arxiv_cleaner.py b/flagdata/cleaner/arxiv_cleaner.py new file mode 100644 index 0000000..ed74c6f --- /dev/null +++ b/flagdata/cleaner/arxiv_cleaner.py @@ -0,0 +1,101 @@ +import re +from flagdata.cleaner.base_cleaner import Cleaner +from typing import Dict +import json +import logging +from tqdm import tqdm + + +class ArxivCleaner(Cleaner): + def __init__(self, cleaning_steps): + super().__init__(cleaning_steps) + # 可以添加更多Arxiv特有的初始化逻辑 + + def clean(self): + print("arxiv_cleaner start......") + with open(self.input_path, "r", encoding="utf8") as fr: + try: + for line in tqdm(fr.readlines()): + text = json.loads(line.strip()) + source_text = text[self.source_key] + for func_name in self.config.get('ArxivCleaner', []): + if hasattr(self, func_name): + result_text = getattr(self, func_name)(source_text) + text[self.result_key] = result_text + source_text = result_text + self.write_jsonl_file(text) + except Exception as e: + logging.warning("read error: ", e) + + # 实现 Arxiv 清洗逻辑 + def _clean_tex_file(self, file_content: str, arg_macros: Dict, non_arg_macros: Dict) -> str: + pattern = r"^(.*?)(" + pattern += r"\\\bchapter\b\*?(?:\[(.*?)\])?\{(.*?)\}|" + pattern += r"\\\bpart\b\*?(?:\[(.*?)\])?\{(.*?)\}|" + pattern += r"\\\bsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|" + pattern += r"\\\bsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|" + pattern += r"\\\bsubsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|" + pattern += r"\\\bparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}" + pattern += r"\\\bsubparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}" + pattern += r")" + + if not re.search(pattern, file_content, flags=re.DOTALL): + return "" + + file_content = re.sub(pattern=pattern, repl=r"\2", string=file_content, flags=re.DOTALL) + + file_content = re.sub(pattern=r"(?m)^%.*\n?", repl=r"", string=file_content, flags=re.MULTILINE) + + file_content = re.sub(pattern=r"[^\\]%.+$", repl=r"", string=file_content, flags=re.MULTILINE) + + pattern = r"(" + pattern += r"\\appendix|" + pattern += r"\\begin\{references\}|" + pattern += r"\\begin\{REFERENCES\}|" + pattern += r"\\begin\{thebibliography\}|" + pattern += r"\\bibliography\{.*\}" + pattern += r").*$" + + file_content = re.sub(pattern=pattern, repl=r'', string=file_content, flags=re.DOTALL) + + for macro_name, macro_value in non_arg_macros.items(): + macro_name_escaped = re.escape(macro_name) + + try: + file_content = re.sub(pattern=r"(" + macro_name_escaped + r")" + r"([^a-zA-Z0-9])", + repl=macro_value + r"\2", string=file_content) + except re.error as e: + print("Error occurred while processing:", e) + print("Problematic content:", macro_name_escaped) + + # file_content = re.sub(pattern=r"(" + macro_name + r")" + r"([^a-zA-Z0-9])", + # repl=macro_value + r"\2", string=file_content) + + for macro_name, macro_value in arg_macros.items(): + pass + + return file_content + + def _build_non_arg_macros_dict(self, file_content: str) -> Dict[str, str]: + non_arg_nc_reg = re.compile( + pattern=r'\\\bnewcommand\b\*?\{(\\[a-zA-Z0-9]+?)\}\{(.*?)\}$', + flags=re.MULTILINE + ) + + non_arg_def_reg = re.compile( + pattern=r'\\def\s*(\\[a-zA-Z0-9]+?)\s*\{(.*?)\}$', + flags=re.MULTILINE + ) + + macros = {} + for reg in [non_arg_nc_reg, non_arg_def_reg]: + for match in reg.finditer(file_content): + macro_name = match.group(1).encode("unicode-escape").decode("utf-8") + macro_val = match.group(2).encode("unicode-escape").decode("utf-8") + macros[macro_name] = macro_val + + return macros + + def process_text(self, text): + non_arg_macros = self._build_non_arg_macros_dict(text) + return self._clean_tex_file(text, {}, non_arg_macros) diff --git a/flagdata/cleaner/base_cleaner.py b/flagdata/cleaner/base_cleaner.py new file mode 100644 index 0000000..81d5288 --- /dev/null +++ b/flagdata/cleaner/base_cleaner.py @@ -0,0 +1,69 @@ +import yaml +from abc import ABC, abstractmethod +import logging +import json + + +class Cleaner(ABC): + """ + 清洗类抽象基类 + 1.每个子类需要重写__init__、clean 两个函数,子类有相同逻辑的可以调用父类方法 + 2.run()方法无需重写,run()负责调度 clean、dedup、language_identification,供外部主逻辑调用 + 3.异常处理:统一抛出去,在主干流程内进行捕获并触发机器人报警、打错误日志 + + 后续存在一些公共方法,再提到此处 + """ + + @staticmethod + def _read_config(config_path: str): + with open(config_path, "r", encoding="utf8") as fr: + return yaml.safe_load(fr) + + def __init__(self, config_path="configs/default_clean.yaml"): + """ + 初始化清洗器,接收一个清洗步骤的配置。 + """ + # 初始化一个空列表用于存储值为 True 的键 + self.config = self._read_config(config_path) + logging.info(self.config) + print(self.config) + self.input_path = self.config["basic"].get("input") + self.output_path = self.config["basic"].get("output") + self.source_key = self.config['basic'].get("source_key") + self.result_key = self.config['basic'].get("result_key") + + @abstractmethod + def clean(self): + pass + + """ + 添加子类需要实现自己的clean + """ + + def run(self): + # todo:串联clean、dedup、language_identification + pass + + def read_jsonl_file(self): + with open(self.input_path, "r", encoding="utf8") as fr: + try: + for line in fr: + text = json.loads(line.strip()) + except Exception as e: + logging.warning("read error: ", e) + return text + + def write_jsonl_file(self, text: str): + """ + 写 + :return: + """ + with open(self.output_path, "a", encoding="utf8") as fw: + try: + fw.write(json.dumps(text, ensure_ascii=False) + "\n") + except Exception as e: + logging.warning("write error: ", e) + + +if __name__ == '__main__': + cleaner = Cleaner() diff --git a/flagdata/cleaner/book_cleaner.py b/flagdata/cleaner/book_cleaner.py new file mode 100644 index 0000000..a5f2bd1 --- /dev/null +++ b/flagdata/cleaner/book_cleaner.py @@ -0,0 +1,437 @@ +import re, time +import logging, os +import json +import chardet +import langdetect +from functools import partial +import shutil +from shutil import copy +from collections import defaultdict +import traceback +import math +import zipfile +from bs4 import BeautifulSoup +from langdetect import detect +import subprocess + +from flagdata.cleaner.utils.time_formatter import timeout + + +class BookCleaner(): + def __init__(self, config_path, workspace, target_path): + super().__init__() + base_workspace = os.path.join(workspace, 'book_temp_dir') + + temp_index = 1 + self.workspace = base_workspace + f'{temp_index}' + while os.path.exists(self.workspace): + temp_index += 1 + self.workspace = base_workspace + f'{temp_index}' + + os.makedirs(self.workspace) + logging.info(f"Created directory: {self.workspace}") + + self.target_path = target_path + self.save_index = 0 + try: + with open(config_path, 'r') as config_file: + book_config = json.load(config_file) + self.KEYWORDS_SET = book_config["KEYWORDS_SET"] + self.KEYWORDS_BY_PARTS = book_config["KEYWORDS_BY_PARTS"] + except Exception as e: + logging.error(f"load book_config failed: {str(e)}") + + @timeout(120) + def convert_ebook(self, input_file, converted_file): + try: + command = ['ebook-convert', input_file, converted_file] + subprocess.run(command, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError as e: + print(f"Error during conversion: {e}") + + # check input file fomart: epub/azw/mobi + def convert_file_2epub(self, input_path, book_name): + if not (input_path.endswith('.epub') or input_path.endswith('.azw') or input_path.endswith( + '.azw3') or input_path.endswith('.mobi')): + logging.error(f"Invalid file extension for file: {input_path}. Expected .epub, .azw, .azw3 , or .mobi") + raise ValueError("Invalid file extension. The file must be an .epub, .azw, .azw3 or .mobi file.") + + if input_path.endswith('.azw') or input_path.endswith('.azw3') or input_path.endswith('.mobi'): + # Convert to epub file and store it in workspace + epub_path = os.path.join(self.workspace, f'{book_name}.epub') + + try: + if not os.path.exists(epub_path): + # Redirect standard output and standard error to DEVNULL + subprocess.run(['ebook-convert', input_path, epub_path], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=300) + else: + logging.info(f"{epub_path} already exists.") + except Exception as e: + logging.error(f"Error during conversion: {str(e)}") + else: + epub_path = input_path + + return epub_path + + def unzip_epub(self, zip_path, unzip_path): + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall(unzip_path) + + def zip_epub(self, unzip_path, epub_path): + with zipfile.ZipFile(epub_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + for root, dirs, files in os.walk(unzip_path): + for file in files: + file_path = os.path.join(root, file) + zipf.write(file_path, os.path.relpath(file_path, unzip_path)) + + def find_html_files(self, path): + html_files = [] + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith('.html') or file.endswith('.xhtml'): + html_files.append(os.path.join(root, file)) + return html_files + + def process_html_file(self, html_file): + try: + soup = BeautifulSoup(open(html_file, 'r'), 'html.parser') + except: + encoding = self.detect_encoding(html_file) + soup = BeautifulSoup(open(html_file, 'r', encoding=encoding, errors='ignore'), 'html.parser') + + # Rule 1, if there is a picture and a paragraph in the div, delete the paragraph (delete the caption of the picture) + for div in soup.find_all('div'): + if div.find('img') and len(div.find_all('p')) == 1: + logging.info(f'{str(div)} is deleted form epub file') + div.find('p').decompose() + + ''' + Rule 2, if total text length in >= that in

, It is considered that this page is most likely not the main + text content. By deleting the text in the span, the directory, copyright, cover, author introduction and other meta + information are deleted (using 3000 as the threshold to exclude the case where the main text is directly written in + ) + ''' + + total_span_text_length = sum(len(span.get_text()) for span in soup.find_all('span')) + total_p_text_length = sum(len(p.get_text()) for p in soup.find_all('p')) + if total_span_text_length >= total_p_text_length: + if total_span_text_length < 3000: + for span in soup.find_all('span'): + print(span.get_text()) + for span in soup.find_all('span'): + span.decompose() + return + + def process_epub(self, input_path): + workspace = self.workspace + zip_path = os.path.join(workspace, 'temp.zip') + unzip_path = os.path.join(workspace, 'temp') + epub_path = os.path.join(workspace, 'temp.epub') + + try: + if os.path.exists(unzip_path): + shutil.rmtree(unzip_path) + logging.info(f"Directory {unzip_path} has been removed.") + + shutil.copy(input_path, zip_path) + logging.info(f"File from {input_path} has been copied to {zip_path}.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + os.makedirs(unzip_path, exist_ok=True) + + self.unzip_epub(zip_path, unzip_path) + html_files = self.find_html_files(unzip_path) + + for html_file in html_files: + self.process_html_file(html_file) + + self.zip_epub(unzip_path, epub_path) + + return epub_path + + def detect_encoding(self, file_path): + with open(file_path, 'rb') as f: + raw_data = f.read() + return chardet.detect(raw_data)['encoding'] + + def content_length_lower_ratio(self, lines, threshold): + lens = [self.count_characters(line) for line in lines if self.count_characters(line) != 0] + low_count = sum(1 for x in lens if x <= threshold) + if len(lens) != 0: + return low_count / len(lens) + else: + return 0 + + def count_characters(self, text): + # Calculate the number of Chinese characters + chinese_count = len(re.findall(r'[\u4e00-\u9fff]', text)) + # Count the number of English words + english_words = re.findall(r'\b[a-zA-Z]+\b', text) + english_count = len(english_words) + return chinese_count + english_count + + def remove_excess_newlines(self, content): + return re.sub(r'\n{3,}', '\n\n', content) + + def remove_consecutive_stars(self, content): + return re.sub(r'\*{2,}', '', content) + + def check_part(self, parts): + + def has_keyword(lines, keywords, max_count): + for line in lines: + if any(keyword in line for keyword in keywords) and self.count_characters(line) < max_count: + return True + return False + + def evaluate_part(part): + lines = part.split('\n') + checks = [] + for keyword in self.KEYWORDS_BY_PARTS: + checks.append(has_keyword(lines, keyword["keywords"], keyword["threshold"])) + if self.content_length_lower_ratio(lines, 100) != 1: + checks.append(False) + elif self.content_length_lower_ratio(lines, 40) >= 0.8: + checks.append(True) + else: + checks.append(False) + + # Only parts with less than 5000 characters can be deleted + return any(checks) and self.count_characters(part) <= 5000 + + evaluate_results = [evaluate_part(part) for part in parts] + + total_length = sum(len(part.split('\n')) for part in parts) + # Calculate the cumulative length of each part to determine its position in the total text + cumulative_length = 0 + length_results = [] + + for part in parts: + if cumulative_length + len( + part.split('\n')) <= total_length * 0.2 or cumulative_length >= total_length * 0.8: + length_results.append(True) + else: + length_results.append(False) + cumulative_length += len(part.split('\n')) + + should_delete_ls = [evaluate_result and length_result for evaluate_result, length_result in + zip(evaluate_results, length_results)] + parts = [part for part, should_delete in zip(parts, should_delete_ls) if not should_delete] + + return parts + + def pre_process_zh(self, content, lines): + parts = re.split(r'\n{4,}', content) + parts = self.check_part(parts) + content = '\n\n'.join(parts) + content = self.remove_excess_newlines(content) + content = self.remove_consecutive_stars(content) + lines = content.split('\n') + + return lines + + def clean_by_lines(self, lines, keywords_set): + def apply_rules(keyword, line, index, total_lines): + # 只有前 ratio_s %和后 ratio_e% 的行会被删除 + key, length, is_lower, ratio_s, ratio_e = keyword + tar_line = line.lower().strip() if is_lower else line.strip() + key = key.lower() if is_lower else key + + if (length != 0 and len(line) < length and key in tar_line) or (tar_line == key): + start_index = index if index < total_lines * ratio_s else None + end_index = index if index >= math.ceil(total_lines * (1 - ratio_e)) else None + return start_index, end_index + return (None, None) + + figure_pattern = re.compile(r"^(图|表|Figure|Table)\s*\d+(-\d+)?(\.\d+)?\s*[\u4e00-\u9fa5A-Za-z]*$", + re.IGNORECASE) + + lines = [line for line in lines if not figure_pattern.fullmatch(line)] + + del_keys = ['ePUBw.COM', '电子书下载'] + lines = [line for line in lines if not any(keyword in line for keyword in del_keys)] + + total_lines = len(lines) + + early_index = None + late_index = None + + for index, line in enumerate(lines): + lower_line = line.lower() + line_len = self.count_characters(line) + + # Check for lines starting with a specific start + if 'chapter' in lower_line: + digits = lower_line[lower_line.find('chapter') + len('chapter'):] + matches = re.search(r'\d[\d\s]*\d|\d', digits) + + if matches: # Check if a number was found + digits = re.sub(r'\s+', '', matches.group(0)) + else: + digits = '' # If no digits are found, set digits to an empty string + cleaned_text = re.sub(r'[^a-zA-Z]', '', lower_line) + + if digits == '1' or cleaned_text == 'chapterone': # If "1" is in the number + # Determine whether the number of rows in this row is in the top 30% of the total number of rows + if index < total_lines * 0.3 and line_len < 10: + early_index = index + + if line.strip() in ['I .', 'I.', 'I', ] or lower_line.strip() == 'chapter i' or 'chapter i ' in lower_line: + if index < total_lines * 0.3 and line_len < 20: + early_index = index + + for keyword in keywords_set: + early_candidate, late_candidate = apply_rules(keyword, line, index, total_lines) + + if early_candidate is not None and (early_index is None or early_candidate + 1 > early_index): + early_index = early_candidate + 1 + + if late_index is None and late_candidate is not None: + late_index = late_candidate + + if early_index == None: + early_index = 0 + if late_index == None: + late_index = total_lines + + lines = lines[early_index: late_index] + + lines_to_delete = set() # Store the index of the row to be deleted + lines_length = [self.count_characters(line) for line in lines] + # consecutive + consecutive_count = 0 # The number of rows that meet the conditions + temp = set() + + for i, line in enumerate(lines): + if lines_length[i] < 100 and (('第' in line and '章' in line) or ('第' in line and '幕' in line) or ( + 'chapter' in line.lower()) or re.match(r'^\d+\.', line.strip()) or ('............' in line)): + # Increase the consecutive row count + consecutive_count += 1 + temp.add(i) + if consecutive_count >= 3: + lines_to_delete.update(temp) + elif line in ['', '\n']: # + consecutive_count += 0 + temp.add(i) + else: + consecutive_count = 0 + temp.clear() + + if lines_length[i] < 50 and ( + '.com' in line.lower() or 'www.' in line.lower() or '@' in line.lower() or line[:2] == '##'): + lines_to_delete.add(i) + + # Generate a new list, excluding the rows to be deleted, but keeping the empty rows + lines = [line for i, line in enumerate(lines) if i not in lines_to_delete] + return lines + + def short_line_ratio(self, lines, length_threshold): + count_less = 0 + count_more = 0 + + for line in lines: + line = line.strip() + num_characters = self.count_characters(line) + # Check the end character and number of Chinese characters in the line to classify the count + if num_characters > length_threshold: + count_more += 1 + else: + count_less += 1 + + # Calculate the percentage of lines with less than 50 Chinese characters + total_lines = len(lines) + ratio = count_less / total_lines if total_lines > 0 else 1 + if count_more > 20: + return 0 + else: + return ratio + + def write_jsonl_file(self, content): + with open(self.target_path, 'a') as output_file: + line = json.dumps({"content": content}, ensure_ascii=False) + output_file.write(line + '\n') + + # with open(self.target_path + f'/{self.save_index}.txt', 'w') as output_file: + # output_file.write(content) + # print(self.save_index) + self.save_index += 1 + + def clean(self, input_path): + # 清洗前清空 workspace + workspace = self.workspace + + if os.path.exists(workspace): + shutil.rmtree(workspace) + print(f"Directory removed: {workspace}") + os.makedirs(workspace) + print("book_cleaner start......") + + book_name = os.path.basename(input_path) + book_name, _ = os.path.splitext(book_name) + out_txt_path = os.path.join(workspace, f'{book_name}.txt') + + try: + input_path = self.convert_file_2epub(input_path, book_name) + except Exception as e: + logging.error("convert_file_2epub error: ", e) + return None + + try: + epub_path = self.process_epub(input_path) + except Exception as e: + logging.error("process_epub error: ", e) + return None + + try: + self.convert_ebook(epub_path, out_txt_path) + except Exception as e: + logging.warning("convert_ebook failed: ", e) + return None + + try: + encoding = self.detect_encoding(out_txt_path) + with open(out_txt_path, 'r', encoding=encoding) as file: + content = file.read() + language = langdetect.detect(content) + file.seek(0) + lines = file.readlines() + except Exception as e: + logging.warning("open_ebook failed: ", e) + return None + + if not (language == 'zh-cn' or language == 'en'): + logging.info("Languages other than zh-cn and en") + return None + else: + if language == 'zh-cn' and self.short_line_ratio(lines, 100) > 0.9: + logging.info("not enough long lines") + return None + elif language == 'en' and self.short_line_ratio(lines, 150) > 0.9: + logging.info("not enough long lines") + return None + else: + lines = self.pre_process_zh(content, lines) if language == 'zh-cn' else lines + lines = self.clean_by_lines(lines, self.KEYWORDS_SET) + + content = '\n'.join(lines) + self.write_jsonl_file(content) + + +if __name__ == '__main__': + test_directory = 'input/book_demo_data' + target_path = 'output/book_demo_output.jsonl' + + config_path = 'configs/book_config.json' + + book_cleaner = BookCleaner(config_path=config_path, + workspace='input/book_demo_temp', + target_path=target_path) + + books = [os.path.join(test_directory, filename) for filename in os.listdir(test_directory)] + for book_path in books: + book_cleaner.clean(book_path) diff --git a/flagdata/cleaner/cleaner_builder.py b/flagdata/cleaner/cleaner_builder.py new file mode 100644 index 0000000..6a94f58 --- /dev/null +++ b/flagdata/cleaner/cleaner_builder.py @@ -0,0 +1,45 @@ +from flagdata.cleaner.base_cleaner import Cleaner +import yaml +import importlib + +from flagdata.cleaner.utils.string_utils import is_camel_case, camel_to_snake + + +class CleanerBuilder: + @staticmethod + def build(cleaning_steps_yaml: str) -> Cleaner: + + # 加载YAML配置文件 + with open(cleaning_steps_yaml, 'r') as file: + cleaning_steps_config = yaml.safe_load(file) + # 获取配置文件中指定的清洗器类名 + cleaner_class_name = cleaning_steps_config["basic"].get("cleaner_class") + + if is_camel_case(cleaner_class_name): + cleaner_class_name_path = 'flagdata.cleaner_v2.' + camel_to_snake(cleaner_class_name) + + else: + raise ValueError(f"{cleaner_class_name} is not CamelCase") + # 创建清洗器实例 + try: + module = importlib.import_module(cleaner_class_name_path) + except ImportError: + print(f"Module {cleaner_class_name} not found.") + return None + try: + cleaner_class = getattr(module, cleaner_class_name) + + # Instantiate the class and call its clean method + cleaner_instance = cleaner_class(cleaning_steps_yaml) + except: + print("Cleaner class not specified in the configuration file") + return None + # 对象初始化异常不捕获,外层处理 + return cleaner_instance + + +if __name__ == '__main__': + # cleaning_steps_yaml = 'configs/default_clean.yaml' + cleaning_steps_yaml = 'configs/arxiv_clean.yaml' + # cleaning_steps_yaml = 'configs/text_clean.yaml' + CleanerBuilder.build(cleaning_steps_yaml).clean() diff --git a/flagdata/cleaner/configs/arxiv_clean.yaml b/flagdata/cleaner/configs/arxiv_clean.yaml new file mode 100644 index 0000000..23a4d7f --- /dev/null +++ b/flagdata/cleaner/configs/arxiv_clean.yaml @@ -0,0 +1,14 @@ +basic: + # input file path + input: input/arxiv_demo_input.jsonl + # save file path + output: output/arxiv_demo_output.jsonl + # we support input of jsonl/plain text, if it's text lines, set it to false + is_jsonl: true + # key in the input file corresponding to the value to extract/clean + source_key: text + # key in the output file for saving + result_key: cleanedContent + cleaner_class: ArxivCleaner +ArxivCleaner: + process_text: diff --git a/flagdata/cleaner/configs/book_clean.yaml b/flagdata/cleaner/configs/book_clean.yaml new file mode 100644 index 0000000..1ffef98 --- /dev/null +++ b/flagdata/cleaner/configs/book_clean.yaml @@ -0,0 +1,20 @@ +basic: + # input file path + input: input/book_demo_data + # save file path + output: output/book_demo_output.jsonl + # key in the input file corresponding to the value to extract/clean + config_path: book_config.json + # key in the output file for saving + workspace: input/book_demo_temp + # Remove sentences that contain any of the following listed content + pattern: [ "@article" ] + cleaner_class: BookCleaner +TextCleaner: + drop_docs_exceeding_newline_proportion: + drop_doc_below_ratio: + end_clip: + remove_specific_patterns: + remove_control_chars: + remove_extraspace: + remove_unwanted_lines: \ No newline at end of file diff --git a/flagdata/cleaner/configs/book_config.json b/flagdata/cleaner/configs/book_config.json new file mode 100644 index 0000000..802ac46 --- /dev/null +++ b/flagdata/cleaner/configs/book_config.json @@ -0,0 +1,158 @@ +{ + "KEYWORDS_SET": [ + ["Title", 5, false, 0.2, 0.2], + ["Illustrator", 5, false, 0.2, 0.2], + ["Release Date", 5, false, 0.2, 0.2], + ["Produced by", 5, false, 0.2, 0.2], + ["Illustration", 5, false, 0.2, 0.2], + ["Published", 5, false, 0.2, 0.2], + ["CHAPTERS", 5, false, 0.2, 0.2], + ["ILLUSTRATIONS", 5, false, 0.2, 0.2], + ["INTRODUCTION", 5, false, 0.2, 0.2], + ["SISÄLLYS", 5, false, 0.2, 0.2], + ["CONTENTS", 5, false, 0.2, 0.2], + ["Preface", 5, false, 0.2, 0.2], + ["Dedication", 5, false, 0.2, 0.2], + ["致谢", 5, false, 0.2, 0.2], + ["自序", 5, false, 0.2, 0.2], + ["序言", 5, false, 0.2, 0.2], + ["著者序", 5, false, 0.2, 0.2], + ["引言", 5, false, 0.2, 0.2], + ["目次", 5, false, 0.2, 0.2], + ["目录", 5, false, 0.2, 0.2], + ["附录", 5, false, 0.2, 0.2], + + ["公众号", 20, false, 0.2, 0.2], + ["书单", 20, false, 0.2, 0.2], + ["微信号", 20, false, 0.2, 0.2], + ["读书分享", 20, false, 0.2, 0.2], + ["豆瓣", 20, false, 0.2, 0.2], + ["沉金书屋", 20, false, 0.2, 0.2], + ["电子书搜索", 20, false, 0.2, 0.2], + ["电子书下载", 20, false, 0.2, 0.2], + ["资源分享", 20, false, 0.1, 0.1], + ["电子书打包", 20, true, 0.2, 0.2], + ["电子书免费", 20, true, 0.2, 0.2], + ["满屋书香", 20, true, 0.2, 0.2], + ["古德猫宁李", 40, true, 0.2, 0.2], + ["读书简史", 20, true, 0.2, 0.2], + ["书单分享", 20, true, 0.2, 0.2], + ["资源分享", 20, true, 0.2, 0.2], + ["电子书打包", 40, true, 0.2, 0.2], + ["ePUBw", 40, true, 0.2, 0.2], + ["中华书局", 20, true, 0.2, 0.2], + ["First Page", 10, true, 0.2, 0.2], + ["Title Page", 10, true, 0.2, 0.2], + ["Table of Contents", 10, true, 0.2, 0.2], + ["Copyright Page", 10, true, 0.2, 0.2], + ["ACKNOWLEDGMENTS", 10, false, 0.2, 0.2], + ["LICENSE NOTES:", 20, true, 0.2, 0.2], + ["Published:", 20, true, 0.2, 0.2], + ["Publisher:", 20, true, 0.2, 0.2], + ["Language: English", 10, true, 0.2, 0.2], + ["Release Date:", 10, true, 0.2, 0.2], + ["Illustrator:", 10, true, 0.2, 0.2], + ["Commentator:", 10, true, 0.2, 0.2], + ["Author:", 10, true, 0.2, 0.2], + ["Title:", 10, true, 0.2, 0.2], + ["Words:", 10, true, 0.2, 0.2], + ["Packaged:", 10, true, 0.2, 0.2], + ["Warnings:", 10, true, 0.2, 0.2], + ["eBook", 20, false, 0.2, 0.2], + ["License Notes", 10, true, 0.2, 0.2], + ["Published by:", 10, true, 0.2, 0.2], + ["作者简介", 15, true, 0.2, 0.2], + ["copyright ©", 40, true, 0.2, 0.2], + + ["CIP", 40, false, 0.2, 0.2], + ["出版社", 15, true, 0.2, 0.2], + ["邮政编码", 15, true, 0.2, 0.2], + ["印张", 15, true, 0.2, 0.2], + ["印次", 15, true, 0.2, 0.2], + ["版次", 15, true, 0.2, 0.2], + ["版权", 15, true, 0.2, 0.2], + ["出版发行:", 15, true, 0.2, 0.2], + ["责任编辑", 15, true, 0.2, 0.2], + ["版权信息", 15, true, 0.2, 0.2], + ["Copyright", 10, false, 0.2, 0.2], + ["COPYRIGHT", 10, false, 0.2, 0.2], + + ["反侵权盗版声明", 20, true, 0.2, 0.2], + ["序 /", 20, true, 0.2, 0.2], + ["Contents", 15, false, 0.2, 0.2], + + ["前言:", 0, true, 0.2, 0.2], + ["作者序", 0, true, 0.2, 0.2], + ["目 录", 0, true, 0.2, 0.2], + ["作者简介", 0, true, 0.2, 0.2], + ["text copyright", 0, true, 0.2, 0.2], + ["序 /", 0, true, 0.2, 0.2], + ["致谢", 0, true, 0.2, 0.2], + ["致 谢", 0, true, 0.2, 0.2], + ["自序", 0, true, 0.2, 0.2], + ["自 序", 0, true, 0.2, 0.2], + ["序言", 0, true, 0.2, 0.2], + ["著者序", 0, true, 0.2, 0.2], + ["序 言", 0, true, 0.2, 0.2], + ["引言", 0, true, 0.2, 0.2], + ["引 言", 0, true, 0.2, 0.2], + ["目次", 0, true, 0.2, 0.2], + ["目 次", 0, true, 0.2, 0.2], + ["目录", 0, true, 0.2, 0.2], + ["附录", 0, true, 0.2, 0.2], + ["附 录", 0, true, 0.2, 0.2], + ["前 言", 0, true, 0.2, 0.2], + ["后记", 0, true, 0.2, 0.2], + ["后 记", 0, true, 0.2, 0.2], + ["引子", 0, true, 0.2, 0.2], + ["引 子", 0, true, 0.2, 0.2], + ["CHAPTER", 0, false, 0.2, 0.2], + + ["All rights reserved", 100, true, 0.2, 0.2], + ["First published", 100, true, 0.2, 0.2], + ["This ebook is copyright", 100, true, 0.2, 0.2], + ["All rights reserved", 1000, false, 0.2, 0.2], + ["First published", 1000, false, 0.2, 0.2], + ["This ebook is copyright", 1000, false, 0.2, 0.2], + + ["微博", 80, true, 0.2, 0.2], + ["微信", 80, true, 0.2, 0.2], + ["朋友圈", 80, true, 0.2, 0.2], + ["ISBN", 80, true, 0.2, 0.2], + ["前言:", 80, true, 0.2, 0.2], + ["作者序", 80, true, 0.2, 0.2], + ["目 录", 80, true, 0.2, 0.2], + ["作者简介", 80, true, 0.2, 0.2], + ["text copyright", 80, true, 0.2, 0.2], + ["©", 200, true, 0.2, 0.2] + ] +, + + "KEYWORDS_BY_PARTS": [ + { + "keywords": ["鸣谢", "目录", "附录", "封面", "前言", "鸣 谢", "目 录", "附 录", "封 面", "前 言"], + "threshold": 5 + }, + { + "keywords": [ + "公众号", "书单", "微信号", "力荐", "读书分享", "豆瓣", "沉金书屋", + "电子书搜索", "电子书下载", "资源分享", "电子书打包", "电子书免费", + "满屋书香", "古德猫宁李", "读书简史", "书单分享", "资源分享", + "电子书打包", "ePUBw", "中华书局" + ], + "threshold": 30 + }, + { + "keywords": [ + "CIP", "出版社", "邮政编码", "印张", "定价", "印次", "版次", "ISBN", "版权", + "出版发行:", "责任编辑", "版权信息", "Copyright" + ], + "threshold": 30 + }, + { + "keywords": ["邮箱", "www.", ".com", ".cn", "电话"], + "threshold": 10 + } + ] + +} diff --git a/flagdata/cleaner/configs/html_clean.yaml b/flagdata/cleaner/configs/html_clean.yaml new file mode 100644 index 0000000..9a8b7d0 --- /dev/null +++ b/flagdata/cleaner/configs/html_clean.yaml @@ -0,0 +1,18 @@ +basic: + # input file path + input: demo_input.jsonl + # save file path + output: demo_output.jsonl + # we support input of jsonl/plain text, if it's text lines, set it to false + is_jsonl: true + # key in the input file corresponding to the value to extract/clean + source_key: rawContent + # key in the output file for saving + result_key: cleanedContent + cleaner_class: HTMLCleaner +HTMLCleaner: + remove_extraspace: + remove_fragments: + + + diff --git a/flagdata/cleaner/configs/qa_clean.yaml b/flagdata/cleaner/configs/qa_clean.yaml new file mode 100644 index 0000000..e69de29 diff --git a/flagdata/cleaner/configs/text_clean.yaml b/flagdata/cleaner/configs/text_clean.yaml new file mode 100644 index 0000000..7e9d05a --- /dev/null +++ b/flagdata/cleaner/configs/text_clean.yaml @@ -0,0 +1,20 @@ +basic: + # input file path + input: input/text_demo_input.jsonl + # save file path + output: output/text_demo_output.jsonl + # key in the input file corresponding to the value to extract/clean + source_key: text + # key in the output file for saving + result_key: clean_content + # Remove sentences that contain any of the following listed content + pattern: [ "@article" ] + cleaner_class: TextCleaner +TextCleaner: + drop_docs_exceeding_newline_proportion: + drop_doc_below_ratio: + end_clip: + remove_specific_patterns: + remove_control_chars: + remove_extraspace: + remove_unwanted_lines: \ No newline at end of file diff --git a/flagdata/cleaner/docs/Book_Cleaner.md b/flagdata/cleaner/docs/Book_Cleaner.md new file mode 100644 index 0000000..87e5a9d --- /dev/null +++ b/flagdata/cleaner/docs/Book_Cleaner.md @@ -0,0 +1,146 @@ +## Book_Cleaner +### Description + +`Book_Cleaner` is a module designed for processing e-books. Its main function is to extract text content from e-book files (formats like EPUB, MOBI, AZW) and remove unwanted parts such as the table of contents, advertisements and image annotations. + +### Environment Setup + +`Book_Cleaner` utilizes the `Calibre ` library ([[https://calibre-ebook.com/](https://calibre-ebook.com/)]) to parse e-books. + +You can install `Calibre ` with the following command: + + +`sudo -v && wget -nv -O- https://download.calibre-ebook.com/linux-installer.sh | sudo sh /dev/stdin` + +### Usage +`Book_Cleaner` is initialized with three parameters: + + +通过调用 `Book_Cleaner`的`clean`函数可以实现对电子书的处理,以下是一个简单的使用示例: + +1. `config_path`: Path to a JSON file containing a series of keywords that guide the text cleaning process. Users can edit this file to add custom keywords to suit specific cleaning needs. + +2. `workspace`: A directory path used to store temporary files generated during the e-book processing. + + (**Note**: When `Book_Cleaner` is initialized, it will create a folder named `book_temp_dir1` in `workspace`. Each time the clean function is called, all files inside `book_temp_dir1` will be deleted. If a folder named `book_temp_dir1` already exists at initialization, it will attempt to create `book_temp_dir2`, and so forth.) + +3. `target_path`: Path to a JSON file where the processed content will be saved. + +The `clean` function of `Book_Cleaner` can be used to process e-books. Here is a simple usage example: + + +```python +# Configuration file path +config_path = 'path/to/config.json' +# Temporary file path +workspace = 'path/to/workspace' +# Target path +target_path = 'path/to/target' + +# Initialize +cleaner = Book_Cleaner(config_path, workspace, target_path) + +# Perform cleaning +cleaner.process_books() +``` + +### Demonstration +#### 1. Remove cover, copyright page +*** +>~~*First published in 2015*~~ +> +>~~*Allen & Unwin*~~ +> +>~~*83 Alexander Street*~~ +> +>~~*Crows Nest NSW 2065*~~ +> +>~~*Australia*~~ +> +>~~*Phone: (61 2) 8425 0100*~~ +> +>~~*Cataloguing-in-Publication details are available from the National Library of Australia*~~ +> +>~~*ISBN 978 1 74331 9208*~~ +> +>~~*eISBN 978 1 74343 637 0*~~ +> +>~~*Internal design by Christabella Designs*~~ +> +>~~*Typeset by Post Pre-press Group, Australia*~~ +> +>~~*Extracts from The Kite Runner by Khaled Hosseini © Bloomsbury Publishing UK*~~ +> +> … +> +>*Chapter 1* +> +>*She was standing at the end of the hall in the late afternoon light, her back arched against the wall, between two landscape paintings. Closing her eyes, she ran her hands up and down the red dress clinging to her curves. The dress had been designed by a genius. Its neckline was low enough to be provocative, but the hemline was below knee-length—too long for Leo, the husband of our hostess, to …* +*** +#### 2. Removing Directories +*** +>~~*Contents*~~ +> +>~~*Cover*~~ +> +>~~*About the Book*~~ +> +>~~*About the Author*~~ +> +>~~*Title Page*~~ +> +>~~*Dedication*~~ +> +>~~*Prologue*~~ +> +>~~*Chapter One Innocent Days*~~ +> +>~~*Chapter Two Culture Shock*~~ +> +>~~*Chapter Three Daddy*~~ +> +>… +> +>*About the Book* +> +>*What do they find attractive about me? An underage girl who just lies there sobbing, looking up at them... as they come to me one by one.* +>*This is the shocking true story of how a young girl from Rochdale came to be Girl A – the key witness in the trial of Britain’s most notorious child sex ring. …* +*** +#### 3. Remove image annotations +*** +>*No disease has ever been so instantly recognized or so widely known and feared. …* +> +>~~*Figure 1. Smallpox Deities. Sopona (left) was the smallpox god among the Yorubas of western Africa. Sitala Mata (right), the Hindu goddess of smallpox, shown astride a donkey, was widely worshipped in temples throughout the Indian countryside.*~~ +> +>*Dr. Nick Ward, one of my senior staff, accompanied me on the ward rounds. A veteran of medical service in Africa, he had cared for patients with the worst of tropical diseases. As we left the hospital, he placed his hands on the railing of a balcony, leaned over as he looked at the ground and said, “I don’t think I can ever again walk through a ward like that. It is unimaginable.”* +*** + +#### 4. Remove advertisements +*** +>~~*Medieval Series*~~ +> +>~~*Legendary Bastards of the Crown Series*~~ +> +>~~*Legendary Bastards of the Crown Series*~~ +> +>~~*Seasons of Fortitude Series*~~ +> +>~~*Legacy of the Blade Series*~~ +> +>*…* +> +>~~*And More!*~~ +> +>~~*Please visit http:// xxx.com !*~~ +> +>*…* +> +>*A small hum of excitement rose inside her. She’d been traveling since the morning, and this trip was twice the distance of the longest journey she’d ever taken alone. But she’d done it. Exhausted but quietly exhilarated, she turned the radio up loud. This was a time for celebration. She clenched her hand in the air in a solitary fist-bump.* +>*A wooden sign on the left-hand side of the road read: BLEATH Population 3,667* +> +>~~*Good book recommendation: http:// xxx.com !*~~ +> +>*A small town was going to be a culture shock after New York City—the only place she’d ever known. She’d lived in a three-bedroom apartment with her parents and brother since she was three. And now she was heading off by herself for an entire month, to conduct research for her final year thesis at college.* +*** + + diff --git a/flagdata/cleaner/docs/Book_Cleaner_ZH.md b/flagdata/cleaner/docs/Book_Cleaner_ZH.md new file mode 100644 index 0000000..49823a3 --- /dev/null +++ b/flagdata/cleaner/docs/Book_Cleaner_ZH.md @@ -0,0 +1,141 @@ +## Book_Cleaner +### 描述 + +`Book_Cleaner` 是一个用于处理电子书的模块,主要功能是从电子书文件(EPUB, MOBI, AZW)中提取文本内容,并且删除一些不需要的部分(目录,广告,图片标注等 )。 + +### 环境配置 + +`Book_Cleaner`使用`Calibre `库 ([[https://calibre-ebook.com/](https://calibre-ebook.com/)]) 对电子书进行解析。 + +`Calibre `安装: + +`sudo -v && wget -nv -O- https://download.calibre-ebook.com/linux-installer.sh | sudo sh /dev/stdin` + +### 用法 +`Book_Cleaner` 的初始化需要以下三个参数: + +1. `config_path`: JSON文件路径,该文件包含了一系列的关键词信息。这些关键词用于指导文本的清洗过程。用户可以编辑这个文件,添加自定义的关键词以适应特定的清洗需求。 + +2. `workspace`: 一个文件夹路径,用于存储处理电子书过程中生成的临时文件。 + + (**注意**: `Book_Cleaner`初始化时会在`workspace`中建立一个名为`book_temp_dir1`的文件夹,每次调用`clean`函数时会删除`book_temp_dir1`里面的所有文件, 若初始化之前已经有名为`book_temp_dir1`的文件夹,则会尝试创建`book_temp_dir2`, 依此类推。) + +3. `target_path`: JSON文件路径,处理后内容将被保存在`target_path`中。 + +通过调用 `Book_Cleaner`的`clean`函数可以实现对电子书的处理,以下是一个简单的使用示例: + +```python +# 配置文件路径 +config_path = 'path/to/config.json' +# 临时文件路径 +workspace = 'path/to/workspace' +# 目标路径 +target_path = 'path/to/target' + +# 初始化 +cleaner = Book_Cleaner(config_path, workspace, target_path) + +# 执行清洗处理 +cleaner.process_books() +``` + +### 效果展示 +#### 1. 去除封皮,版权页 +*** +>~~*First published in 2015*~~ +> +>~~*Allen & Unwin*~~ +> +>~~*83 Alexander Street*~~ +> +>~~*Crows Nest NSW 2065*~~ +> +>~~*Australia*~~ +> +>~~*Phone: (61 2) 8425 0100*~~ +> +>~~*Cataloguing-in-Publication details are available from the National Library of Australia*~~ +> +>~~*ISBN 978 1 74331 9208*~~ +> +>~~*eISBN 978 1 74343 637 0*~~ +> +>~~*Internal design by Christabella Designs*~~ +> +>~~*Typeset by Post Pre-press Group, Australia*~~ +> +>~~*Extracts from The Kite Runner by Khaled Hosseini © Bloomsbury Publishing UK*~~ +> +> … +> +>*Chapter 1* +> +>*She was standing at the end of the hall in the late afternoon light, her back arched against the wall, between two landscape paintings. Closing her eyes, she ran her hands up and down the red dress clinging to her curves. The dress had been designed by a genius. Its neckline was low enough to be provocative, but the hemline was below knee-length—too long for Leo, the husband of our hostess, to …* +*** +#### 2. 去除目录 +*** +>~~*Contents*~~ +> +>~~*Cover*~~ +> +>~~*About the Book*~~ +> +>~~*About the Author*~~ +> +>~~*Title Page*~~ +> +>~~*Dedication*~~ +> +>~~*Prologue*~~ +> +>~~*Chapter One Innocent Days*~~ +> +>~~*Chapter Two Culture Shock*~~ +> +>~~*Chapter Three Daddy*~~ +> +>… +> +>*About the Book* +> +>*What do they find attractive about me? An underage girl who just lies there sobbing, looking up at them... as they come to me one by one.* +>*This is the shocking true story of how a young girl from Rochdale came to be Girl A – the key witness in the trial of Britain’s most notorious child sex ring. …* +*** +#### 3. 去除图片注释 +*** +>*No disease has ever been so instantly recognized or so widely known and feared. …* +> +>~~*Figure 1. Smallpox Deities. Sopona (left) was the smallpox god among the Yorubas of western Africa. Sitala Mata (right), the Hindu goddess of smallpox, shown astride a donkey, was widely worshipped in temples throughout the Indian countryside.*~~ +> +>*Dr. Nick Ward, one of my senior staff, accompanied me on the ward rounds. A veteran of medical service in Africa, he had cared for patients with the worst of tropical diseases. As we left the hospital, he placed his hands on the railing of a balcony, leaned over as he looked at the ground and said, “I don’t think I can ever again walk through a ward like that. It is unimaginable.”* +*** + +#### 4. 去除广告 +*** +>~~*Medieval Series*~~ +> +>~~*Legendary Bastards of the Crown Series*~~ +> +>~~*Legendary Bastards of the Crown Series*~~ +> +>~~*Seasons of Fortitude Series*~~ +> +>~~*Legacy of the Blade Series*~~ +> +>*…* +> +>~~*And More!*~~ +> +>~~*Please visit http:// xxx.com !*~~ +> +>*…* +> +>*A small hum of excitement rose inside her. She’d been traveling since the morning, and this trip was twice the distance of the longest journey she’d ever taken alone. But she’d done it. Exhausted but quietly exhilarated, she turned the radio up loud. This was a time for celebration. She clenched her hand in the air in a solitary fist-bump.* +>*A wooden sign on the left-hand side of the road read: BLEATH Population 3,667* +> +>~~*Good book recommendation: http:// xxx.com !*~~ +> +>*A small town was going to be a culture shock after New York City—the only place she’d ever known. She’d lived in a three-bedroom apartment with her parents and brother since she was three. And now she was heading off by herself for an entire month, to conduct research for her final year thesis at college.* +*** + + diff --git a/flagdata/cleaner/docs/Qa_Cleaner.md b/flagdata/cleaner/docs/Qa_Cleaner.md new file mode 100644 index 0000000..2ee3512 --- /dev/null +++ b/flagdata/cleaner/docs/Qa_Cleaner.md @@ -0,0 +1,43 @@ +- **QACleaner: Filtering out low-quality data by training a classifier using pre-labeled text quality data.** + - Input: + - Labeled Data + ```json lines + //{"text": #input text, "label": 0/1, 0 for high quality, 1 indicating low quality} + //低质量:赌博/诈骗/低俗色情//营销内容 + {"text": "“酒店招女/男公关”小广告的目的是什么? 帮我改了标题之后似乎把我的原意给模糊了,我是想问:小广告重点是想“招聘女职员”,还是想吸引男人注意去消费。 本人带队直招:(不收取任何形式的押金、进场费、管理费、介绍费等等)。我们客源多而稳定生意红火,绝对不会出现几天上不到班的情况,挣钱不是梦。每个做夜场的女孩都不想东奔西走的找场子,就算日新俩三千,三天打鱼两天晒网,到头来也赚不到什么钱。都想找一个能稳定的可以挣到钱的场子和一个好的领队。如果你们也是这样想的,那么就请相信我,我这里就是你们不二的选择,如果你犹豫了错过了,那就只能怪你们自己", "label": "0"} + {"text": "如何通俗易懂地解释遗传算法?有什么例子? 大三软件工程学生,以前只听过遗传算法这个名字,但是真正是怎么一回事没有了解过。今天刚好看到 的回答,想起这学期正好选了人工智能这堂课,就觉得想试着码一下这个算法,算是提前预习一下。于是花了两个小时把这个算法大概搞懂了,把思路写一遍,也算是自己再熟悉一遍(如果哪里搞错了请大神们轻喷 理解这个算法首先要理解一些术语。下图(来自 Genetic Algorithms Fundamentals )把术语之间的关系表示的很清楚。 遗传算法就是通过不断地进化,将种群里面我们最想要的染色体保留下来。进化多次之后,种群里的大部分染色体都会是比较优势的染色体(我们想要的解),所以我们可以通过这个算法获取多个较优解。 ps: 关于基因和等位基因的区别:基因(gene)是指染色体上的特定位置,而等位基因(allele)则是当前染色体在该基因处的值。 知道一些术语之间的关系之后,可以试着尝试搞懂算法了。拿@sjyan刚刚这道题做例子。 求解函数 f(x) = x + 10*sin(5*x) + 7*cos(4*x) 在区间[0,9]的最大值。 用遗传算法解这道题的过程, 他说得很清楚,主要是三个阶段: 初始化阶段 确定染色体的形式。先选择一种方式对x进行编码,使其从实际的解空间(phenotype space)被映射到编码空间(genotype space),也就是把实数x变成一条染色体。在这道题中我沿用了@sjyan的编码方式,即把解空间划分为 2^{17}-1 等份,然后通过一个17个bit的染色体来表达解空间的实数值。 确定好染色体形式之后,我们便可以拿它生成一个初始的种群。 进化迭代阶段 接下来会进行不停地进化迭代,每次迭代主要由三个阶段组成:选择、交叉、变异。 选择阶段。选择阶段经历了适应性选择和随机选择。在适应性选择中,我们通过适应性函数(fitness function)对种群中的每一条染色体进行适应性评估,按评估结果对染色体进行排序。筛选出适应性最好的一定数量(可以通过参数调节)的染色体,作为下一代的父母加入存货列表。而在随机选择中,我们会随机挑选一些没有通过适应性选择的个体也加入存活列表,这样做是为了使得一些拥有潜在价值基因但适应性很差的个体得以生存下来。 交叉阶段。每一代染色体的数量是一定的,我们淘汰了一部分染色体,就要生成新的染色体来补足空缺。从上一代中,我们保留了一部分存活的染色体,它们之间将会进行交叉。交叉是指随机从存活列表中抽取两个染色体,将这两条染色体进行融合从而生成新的染色体(就是取一部分父染色体的基因,再在母染色体取在父染色体没有取到的基因,把这些基因合成一条新的染色体),把新的染色体加入种群中。交叉操作会一直持续,直到种群数量跟之前的种群数量相同。 变异阶段。对于种群中的每一条染色体,使其一定几率地发生随机变异(在这个例子下就是反转染色体上某一个bit的值)。 验收阶段 经过很多代的进化之后,种群里面的染色体基本上符合最优化的要求了。这时就可以去对里面的染色体进行解码(decode),将其转化为实际的解。 python实现 代码写的挺渣的,不过标了很多注释。 #encoding=utf-8import mathimport randomimport operatorclass GA(): def __init__(self, length, count): # 染色体长度 self.length = length # 种群中的染色体数量 self.count = count # 随机生成初始种群 self.population = self.gen_population(length, count) def evolve(self, retain_rate=0.2, random_select_rate=0.5, mutation_rate=0.01): \"\"\" 进化 对当前一代种群依次进行选择、交叉并生成新一代种群,然后对新一代种群进行变异 \"\"\" parents = self.selection(retain_rate, random_select_rate) self.crossover(parents) self.mutation(mutation_rate) def gen_chromosome(self, length): \"\"\" 随机生成长度为length的染色体,每个基因的取值是0或1 这里用一个bit表示一个基因 \"\"\" chromosome = 0 for i in xrange(length): chromosome |= (1 << i) * random.randint(0, 1) return chromosome def gen_population(self, length, count): \"\"\" 获取初始种群(一个含有count个长度为length的染色体的列表) \"\"\" return [self.gen_chromosome(length) for i in xrange(count)] def fitness(self, chromosome): \"\"\" 计算适应度,将染色体解码为0~9之间数字,代入函数计算 因为是求最大值,所以数值越大,适应度越高 \"\"\" x = self.decode(chromosome) return x + 10*math.sin(5*x) + 7*math.cos(4*x) def selection(self, retain_rate, random_select_rate): \"\"\" 选择 先对适应度从大到小排序,选出存活的染色体 再进行随机选择,选出适应度虽然小,但是幸存下来的个体 \"\"\" # 对适应度从大到小进行排序 graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] # 选出适应性强的染色体 retain_length = int(len(graded) * retain_rate) parents = graded[:retain_length] # 选出适应性不强,但是幸存的染色体 for chromosome in graded[retain_length:]: if random.random() < random_select_rate: parents.append(chromosome) return parents def crossover(self, parents): \"\"\" 染色体的交叉、繁殖,生成新一代的种群 \"\"\" # 新出生的孩子,最终会被加入存活下来的父母之中,形成新一代的种群。 children = [] # 需要繁殖的孩子的量 target_count = len(self.population) - len(parents) # 开始根据需要的量进行繁殖 while len(children) < target_count: male = random.randint(0, len(parents)-1) female = random.randint(0, len(parents)-1) if male != female: # 随机选取交叉点 cross_pos = random.randint(0, self.length) # 生成掩码,方便位操作 mask = 0 for i in xrange(cross_pos): mask |= (1 << i) male = parents[male] female = parents[female] # 孩子将获得父亲在交叉点前的基因和母亲在交叉点后(包括交叉点)的基因 child = ((male & mask) | (female & ~mask)) & ((1 << self.length) - 1) children.append(child) # 经过繁殖后,孩子和父母的数量与原始种群数量相等,在这里可以更新种群。 self.population = parents + children def mutation(self, rate): \"\"\" 变异 对种群中的所有个体,随机改变某个个体中的某个基因 \"\"\" for i in xrange(len(self.population)): if random.random() < rate: j = random.randint(0, self.length-1) self.population[i] ^= 1 << j def decode(self, chromosome): \"\"\" 解码染色体,将二进制转化为属于[0, 9]的实数 \"\"\" return chromosome * 9.0 / (2**self.length-1) def result(self): \"\"\" 获得当前代的最优值,这里取的是函数取最大值时x的值。 \"\"\" graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] return ga.decode(graded[0]) if __name__ == '__main__': # 染色体长度为17, 种群数量为300 ga = GA(17, 300) # 200次进化迭代 for x in xrange(200): ga.evolve() print ga.result() 简陋的)运行结果,很接近 总结 遗传算法可以产出一组相对较优的解,而且不需要根据具体问题去进行过多的逻辑推演,速度也相对较快。缺点就是不能保证解是最优的。", "label": "1"} + {"text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)? 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。 覆盖率高,当然好,但工程类的东西往往需要妥协和平衡,时间不够时,先测什么,后测什么,就更重要一些了。", "label": "1"} + //低质量:信息量低//口语化内容 + {"text": "有车是怎样的体验? 生活质量有提高。也有降低。 可以想去哪去哪,不开心就转一圈。 降低也有很多。刚毕业不久,工作原因买了个商务车。油耗保养什么的挤占你的工资。还有就是,朋友之间的,一个电话,你可能就要跑很久接送人,比较不开心。 最特么重要的是,当你看见莫名其妙的罚款单的时候。 索性没人认识。不打码了。答案千万别火啊。", "label": "0"} + //…… + ``` + + - Data to be cleaned + ```json lines + //{"text": #input text, "label": 0/1, 0 for high quality, 1 indicating low quality} + //低质量:赌博/诈骗/低俗色情//营销内容 + {"text": "“酒店招女/男公关”小广告的目的是什么? 帮我改了标题之后似乎把我的原意给模糊了,我是想问:小广告重点是想“招聘女职员”,还是想吸引男人注意去消费。 本人带队直招:(不收取任何形式的押金、进场费、管理费、介绍费等等)。我们客源多而稳定生意红火,绝对不会出现几天上不到班的情况,挣钱不是梦。每个做夜场的女孩都不想东奔西走的找场子,就算日新俩三千,三天打鱼两天晒网,到头来也赚不到什么钱。都想找一个能稳定的可以挣到钱的场子和一个好的领队。如果你们也是这样想的,那么就请相信我,我这里就是你们不二的选择,如果你犹豫了错过了,那就只能怪你们自己", "label": "0"} + {"text": "如何通俗易懂地解释遗传算法?有什么例子? 大三软件工程学生,以前只听过遗传算法这个名字,但是真正是怎么一回事没有了解过。今天刚好看到 的回答,想起这学期正好选了人工智能这堂课,就觉得想试着码一下这个算法,算是提前预习一下。于是花了两个小时把这个算法大概搞懂了,把思路写一遍,也算是自己再熟悉一遍(如果哪里搞错了请大神们轻喷 理解这个算法首先要理解一些术语。下图(来自 Genetic Algorithms Fundamentals )把术语之间的关系表示的很清楚。 遗传算法就是通过不断地进化,将种群里面我们最想要的染色体保留下来。进化多次之后,种群里的大部分染色体都会是比较优势的染色体(我们想要的解),所以我们可以通过这个算法获取多个较优解。 ps: 关于基因和等位基因的区别:基因(gene)是指染色体上的特定位置,而等位基因(allele)则是当前染色体在该基因处的值。 知道一些术语之间的关系之后,可以试着尝试搞懂算法了。拿@sjyan刚刚这道题做例子。 求解函数 f(x) = x + 10*sin(5*x) + 7*cos(4*x) 在区间[0,9]的最大值。 用遗传算法解这道题的过程, 他说得很清楚,主要是三个阶段: 初始化阶段 确定染色体的形式。先选择一种方式对x进行编码,使其从实际的解空间(phenotype space)被映射到编码空间(genotype space),也就是把实数x变成一条染色体。在这道题中我沿用了@sjyan的编码方式,即把解空间划分为 2^{17}-1 等份,然后通过一个17个bit的染色体来表达解空间的实数值。 确定好染色体形式之后,我们便可以拿它生成一个初始的种群。 进化迭代阶段 接下来会进行不停地进化迭代,每次迭代主要由三个阶段组成:选择、交叉、变异。 选择阶段。选择阶段经历了适应性选择和随机选择。在适应性选择中,我们通过适应性函数(fitness function)对种群中的每一条染色体进行适应性评估,按评估结果对染色体进行排序。筛选出适应性最好的一定数量(可以通过参数调节)的染色体,作为下一代的父母加入存货列表。而在随机选择中,我们会随机挑选一些没有通过适应性选择的个体也加入存活列表,这样做是为了使得一些拥有潜在价值基因但适应性很差的个体得以生存下来。 交叉阶段。每一代染色体的数量是一定的,我们淘汰了一部分染色体,就要生成新的染色体来补足空缺。从上一代中,我们保留了一部分存活的染色体,它们之间将会进行交叉。交叉是指随机从存活列表中抽取两个染色体,将这两条染色体进行融合从而生成新的染色体(就是取一部分父染色体的基因,再在母染色体取在父染色体没有取到的基因,把这些基因合成一条新的染色体),把新的染色体加入种群中。交叉操作会一直持续,直到种群数量跟之前的种群数量相同。 变异阶段。对于种群中的每一条染色体,使其一定几率地发生随机变异(在这个例子下就是反转染色体上某一个bit的值)。 验收阶段 经过很多代的进化之后,种群里面的染色体基本上符合最优化的要求了。这时就可以去对里面的染色体进行解码(decode),将其转化为实际的解。 python实现 代码写的挺渣的,不过标了很多注释。 #encoding=utf-8import mathimport randomimport operatorclass GA(): def __init__(self, length, count): # 染色体长度 self.length = length # 种群中的染色体数量 self.count = count # 随机生成初始种群 self.population = self.gen_population(length, count) def evolve(self, retain_rate=0.2, random_select_rate=0.5, mutation_rate=0.01): \"\"\" 进化 对当前一代种群依次进行选择、交叉并生成新一代种群,然后对新一代种群进行变异 \"\"\" parents = self.selection(retain_rate, random_select_rate) self.crossover(parents) self.mutation(mutation_rate) def gen_chromosome(self, length): \"\"\" 随机生成长度为length的染色体,每个基因的取值是0或1 这里用一个bit表示一个基因 \"\"\" chromosome = 0 for i in xrange(length): chromosome |= (1 << i) * random.randint(0, 1) return chromosome def gen_population(self, length, count): \"\"\" 获取初始种群(一个含有count个长度为length的染色体的列表) \"\"\" return [self.gen_chromosome(length) for i in xrange(count)] def fitness(self, chromosome): \"\"\" 计算适应度,将染色体解码为0~9之间数字,代入函数计算 因为是求最大值,所以数值越大,适应度越高 \"\"\" x = self.decode(chromosome) return x + 10*math.sin(5*x) + 7*math.cos(4*x) def selection(self, retain_rate, random_select_rate): \"\"\" 选择 先对适应度从大到小排序,选出存活的染色体 再进行随机选择,选出适应度虽然小,但是幸存下来的个体 \"\"\" # 对适应度从大到小进行排序 graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] # 选出适应性强的染色体 retain_length = int(len(graded) * retain_rate) parents = graded[:retain_length] # 选出适应性不强,但是幸存的染色体 for chromosome in graded[retain_length:]: if random.random() < random_select_rate: parents.append(chromosome) return parents def crossover(self, parents): \"\"\" 染色体的交叉、繁殖,生成新一代的种群 \"\"\" # 新出生的孩子,最终会被加入存活下来的父母之中,形成新一代的种群。 children = [] # 需要繁殖的孩子的量 target_count = len(self.population) - len(parents) # 开始根据需要的量进行繁殖 while len(children) < target_count: male = random.randint(0, len(parents)-1) female = random.randint(0, len(parents)-1) if male != female: # 随机选取交叉点 cross_pos = random.randint(0, self.length) # 生成掩码,方便位操作 mask = 0 for i in xrange(cross_pos): mask |= (1 << i) male = parents[male] female = parents[female] # 孩子将获得父亲在交叉点前的基因和母亲在交叉点后(包括交叉点)的基因 child = ((male & mask) | (female & ~mask)) & ((1 << self.length) - 1) children.append(child) # 经过繁殖后,孩子和父母的数量与原始种群数量相等,在这里可以更新种群。 self.population = parents + children def mutation(self, rate): \"\"\" 变异 对种群中的所有个体,随机改变某个个体中的某个基因 \"\"\" for i in xrange(len(self.population)): if random.random() < rate: j = random.randint(0, self.length-1) self.population[i] ^= 1 << j def decode(self, chromosome): \"\"\" 解码染色体,将二进制转化为属于[0, 9]的实数 \"\"\" return chromosome * 9.0 / (2**self.length-1) def result(self): \"\"\" 获得当前代的最优值,这里取的是函数取最大值时x的值。 \"\"\" graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] return ga.decode(graded[0]) if __name__ == '__main__': # 染色体长度为17, 种群数量为300 ga = GA(17, 300) # 200次进化迭代 for x in xrange(200): ga.evolve() print ga.result() 简陋的)运行结果,很接近 总结 遗传算法可以产出一组相对较优的解,而且不需要根据具体问题去进行过多的逻辑推演,速度也相对较快。缺点就是不能保证解是最优的。", "label": "1"} + {"text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)? 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。 覆盖率高,当然好,但工程类的东西往往需要妥协和平衡,时间不够时,先测什么,后测什么,就更重要一些了。", "label": "1"} + //低质量:信息量低//口语化内容 + {"text": "有车是怎样的体验? 生活质量有提高。也有降低。 可以想去哪去哪,不开心就转一圈。 降低也有很多。刚毕业不久,工作原因买了个商务车。油耗保养什么的挤占你的工资。还有就是,朋友之间的,一个电话,你可能就要跑很久接送人,比较不开心。 最特么重要的是,当你看见莫名其妙的罚款单的时候。 索性没人认识。不打码了。答案千万别火啊。", "label": "0"} + //…… + ``` + - Output: + - Cleaned Data + ```json lines + // 'text': #input text , 'prob': probability of being low quality + {"id": 3330999, "text": "哪些毕业论文的「致谢」部分让你印象深刻? 致谢 时近仲夏,岁在乙未,梧桐树下,清风明月,闲庭信步,回思往事,处处感怀。余十有八入长大,今已七年有余,入校所植之树,今已蔚然成荫矣,叹七载寒窗仅须臾耳,余多可喜多有有憾。 喜者有二,曰路漫漫而有爱,曰上下求索而有得。 有爱者,曰养育恩,曰师生情,曰鹣鲽意,曰同窗谊。 吾跪而叩谢者一曰养育恩。吾本草芥寒门,得有今日之成,皆赖椿萱。家慈教子以严,温良恭俭,家严襟怀洒落,博学多才,尤擅属文。暌隔庭闱,瞬已数载,椿萱衰迈,松菊萧条,每念及兹,余心怆然,唯朝乾夕惕,以报得万一。 吾跪而叩谢者二曰师生情。初,余师从周仁老师,吾才薄识短,囿于章句之学,不谙处世之道,恩师言传身教,耳提面命。晨夕讲授,通宵达旦,不达则不休,必通彻乃已。吾师徒二人尝因公三昼夜不眠不休于兰,盖因推敲二字。余性散漫,恩师尝教之曰:“天下事,果能坚韧不懈,总有可志竟成。志之所向,金石为开,谁能御之?”初吾常思应酬周到,恩师常诲之曰:“大抵任事之人,断不能有毁而无誉,有恩而无怨,不求人人应酬,事事周到,但求问心无愧,知恩图报。”陶冶变化,余性亦有所改。恩师用心之深,用力之勤,无人可及,愿承欢膝下,竭吾之能,以孝恩师,以寸草之心报三春之晖。 吾师鲁师,实乃真学者也,学问精深,品格方正。吾毕业论文开题之际,恰鲁师赴美深造,然其心系学生,吾之论文承蒙鲁师反复开说,屡次跨洋与吾讲说至数刻之久,先生以苦口滴杜鹃之血,增删五次,六易吾稿。余初做学问,困心衡虑,不得其法,胸有千言,或笔下无字,或洋洋洒洒,不得要领,先生言之切切,微言大义,专从学理上用力,不厌其烦,听之,昭然若发蒙也。余之理法才气俱进,胸中之意,今可达腕下。余本才短又性浮嚣,好用小智小术,鲁师常诲之曰:“天下之至拙,能胜天下之至巧。读书立志,须以困勉之功,锱铢积累,则终可达,浮光掠影,终不济事。读书困时切莫放弃,熬过此关,便可少进,日日精进,自有通达之时。”又曰:“自古成名者,多由笨干,天下事,最怕认真二字,凡事认真,终得其报。”三度春秋,鱼渔双授,先生于此,一生永铭。 三曰鹣鲽意。吾爱志伟,伴我寒窗,容余任性,许吾安稳,乃吾铠甲亦吾软肋,路遥遥,愿与君偕行。 四曰同窗谊。竹马之交霍然,共战人生数役,同享生活之乐,互为莫逆,情同手足;友迪,与交七载,相与分忧,互为解难,受益匪浅;友村,居住同室,切磋学艺,同窗情深;友恒大,胸怀锦绣,虽为新交亦引为知己;师兄张宏东、任玉龙、师姐海兰于吾亦有垂教之恩。得友如斯,幸甚至哉。 恩长笔短,吾之述不足以表其万一,寥寥数笔,仅余区区之意也。 有得者,曰知研究之真味,曰体求知之乐趣。书山浩浩,学海淼淼,今有寸得,如沧海一粟,余者吾将毕生求索。 憾者有二, 曰制之无节,曰行之无恒。此二者吾之所短也,余亦深以此自愧。时忆求学事,本因用功之时,常常玩偈,致百不遂志,精神散漫,愧悔憧扰,不能摆脱。人生之大憾非不能也,盖本可以也。语不云乎:往者不可谏,来者犹可追。自今以始,吾必不得自逸,使恒心常在。 伤离别,具表文,余心惶惶然。愿师长安康,同窗如意,再拜稽首。 乙未仲夏于博士楼","meta": {"Prob":0.04515477493841735}} + {"id": 15134791, "text": "都说俄语难,请问俄语究竟难在什么地方? 字母发音这些请勿赘述,因为我觉得凡是打算学一门语言的人总会对这些有心理准备。我想了解俄语语法和词汇方面的难点。还有词源什么的。如果可以的话请对照着中文和英文这两种一般人有了解的语言来说明。 没想到这么多赞同,过来继续补充一些 ======================================= 俄语小本来凑热闹,学得不精,只是少有看到和自己语言有关的忍不住。 如果初学的话,最先接触的肯定是 1 名词性数格 这些在英文中是不存在的,但是很多印欧语系的语言中都有,所以并不特殊,只能说英语极度简化了。名词有3个性,单复数,6个格。 2 动词变位 动词有六个变位,按我们老师的话说,俄语的动词原形其实记了没啥用的,基本用不着。这话夸张了,但可以想见,大多数时候看到的俄语动词都是变位形式,有的特殊变位甚至让你无法推断其原形,而且你在说每句话的时候动词不能够脱口而出,要先在脑子里根据主语及时态给它变位,所以初期很容易说得磕磕巴巴的。其实简单地说动词变位是不准确的,很多人只看到动词的六个变位,事实上这个只要背下来常运用总会熟练的,动词的复杂程度远远不止于此。俄语的动词十分发达,最好的体现在动词前缀上。前缀类型有几十种。同样的词源,加不同的前缀意思就完全不同,甚至南辕北辙,而同样一个动词本身的意思也极其复杂,我总觉得比英语复杂,一个动词有本义引申义经常不少于10种,太常见了,总是容易弄混。 3 上述都算是语法吧,包括 形动词、副动词、无人称句 等等,都是其他语言不存在的语言现象(经知友 提醒西班牙语有副动词,更正),需要一阵的理解钻研,但语法这种东西,一旦理解了就可以举一反三,其实并不是高不可攀的。( 这里补充一点无人称句,为什么列出来,因为我们最熟悉的外语——英语中不存在无人称句。英语有It is raining. 这种句子,中文翻译或者隐含意是“天气”,天下雨了,英文中也是有个形式主语it的,即使没有真正主语,英文中一定有个形式主语,可俄语的无人称句连形式主语都没有,就是没有主语。比如下雨了,“下”这个词会是动词,“雨”变成宾语,没有任何主语。 初学者肯定容易不好理解,但都是可以攻克的难关,像前面说的,一旦懂了就是懂了。) 4 还有许多“ 习惯 ”问题。语言这东西很玄妙。总的说来就是“我们习惯这么说而不那么说”。比如用词。词义辨析是很大一块冰山。同样意思的词有五六个,他们可以是语体的区别(公文语体、官方语、口语等),可以使表达情绪的强烈不同,可以是语境不同、可以是不同的内涵和外延。在这里举个例加强理解: 俄语中常用的“有助于”有三个 (只是常用),这三个要这么区分: 一个是正式语体中用,一个是你全权帮助协助(比如你帮助妹妹学习),一个是你的帮助是一部分因素,比如好的天气有助于运动会的开展。以上不可以混用。这么一看英语的help是不是太可爱了! (这个例子是才接触过所以有印象,其实不算什么,远远不能体现俄语的变态……)总之区别或大或小,你都要记,用错了词,即使意思一样,听起来也很奇怪,会让俄罗斯人不理解。 这个话题说起来太大了,包括文化问题映射在语言上等,先写这么多,有空再来更。 =================================== 应 来补充 感谢各位赞同~ 5 俄罗斯的语气词也很让人头疼。当然如果只是想粗略地学,仅仅和人交流的层面,语气词学不通无伤大雅,大不了不用么!只不过你不会用语气词,你跟人讲话、发邮件就好像是新闻联播一样,这些“小词儿”,数量多,但最关键的是一个词有很多种语气……像же、уж之类,不同语境下意思迥异,尤其在读文学作品,在揣摩和你对话的人的言下之意(比如我只考了80分和我考了80分呢 前者你要安慰后者你要恭喜 搞错了可咋整)上用处极大不可大意。 6 还能想到的俄语比中文发达的一个类别是 情绪、表情 。很多词甚至没有办法找到相应的中文对应。而且中文有一点很好,就是它的“ 上位词 ”很发达。比如说 小说统称为小说,讲起来很方便,不管你写的哪类,总归是小说,我不会说错。但俄语中没有“小说”这个词,只有长篇小说,中篇小说,短篇小说,科幻小说,爱情小说等等……说之前必须要了解才能张口。否则就会犯“前两天看了短篇小说《安娜卡列尼娜》”这种错误,一定要背清楚啊不然会被鄙视死…… 类似的情况不胜枚举,由于没有上位词,俄语的表达难度无形中增加,但是也更准确了。说上位词的原因是,比如在情绪一类,中国有喜怒哀乐,由于上位词的发达(其实这里这么用未必合适,但为了方便起见),喜都是喜:惊喜、狂喜、窃喜,怒都是怒:暴怒、狂怒、怨怒,一看就很好理解。而俄语中这些词都是不一样的,就是长得区别很大!当然外语很多都是这样。比如“惊讶”吧,俄语常用的有6个,长得完全不一样,程度递增,翻译就是: 略惊讶,稍惊讶,很惊讶,特别惊讶,惊讶死了,震惊啊啊啊! 其中第二个惊讶只能用于好事,第六个惊讶只能用于坏事。你都要背下来,不可以乱用。 7 语序 这也算是“习惯”一类吧。你们知道,由于有性数格,所以俄语中的语序没有英文中文那么重要。比如“我打你”“你打我”在中文不一样,顺序可不能乱,但在俄语中就无所谓,因为被打的那个人是变格的,所以谁被打一目了然,你想把谁放在前面都可以。当然语序可不只是这么简单,除了说话时要把 新信息放在最后 , 状语一般放两边 等等这些最基础的,正是由于俄语的语序不影响语法状态,怎么说都行,你就更不能“怎么说都行”,句子一旦长了,怎样编排他们的顺序就很有讲究,顺序说错了会给人一种“话没说完”或者别别扭扭的感觉,这个是长期训练出来的语感,没什么规律,就是多读多说。 文化这方面就极其庞杂了,而又极其重要。俄罗斯文学高度发展,几乎每个人都会背几首普希金叶赛宁的诗,要学俄语这些都是必须。人家经常说着说着“我们的诗人那曾经忧郁的眼神!”,那是说普希金呢;“像塔尼娅一样纯真的爱恋!”那是说《叶甫盖尼奥涅金》呢。跟英国人说啥都喜欢引圣经一个道理。当然俄罗斯文学不止普希金,他们的教育普及程度很高,人们说话喜欢引经据典,尤喜诗歌,所以基础的要知道。 俄罗斯也是一个十分有“传统”的国家,像中国一样。人们说话喜欢引用谚语俗语,而且谚语俗语很大程度上体现这个国家的一些精神。比如俄罗斯对“家国”的热爱,对“入侵”的反感,对“朋友”的珍视,都可以在谚语上体现出来,如果学俄语的话是一定要背的。 比如有句谚语:“ 不请自来的客人比鞑靼人还恶劣 !”(参见俄历史上蒙古鞑靼人入侵并统治240年) 他们喜欢熊,所以就有“ 熊在森林就好比主人在家 ”,认为熊聪明敏锐,守护家园,且憨态可掬。(熊真的对俄罗斯人十分重要,他们喜欢熊就像我们见到大熊猫就叫“好萌”是一个道理) 包括苏联那段历史,他们也会经常调侃,相关的谚语和俗语也很多。 先写这么多,吃饭去了。 =================================== 应 来补充,关于词源,没有具体研究过,还是请专业人士再细述一下吧。 据我所知的,俄语是 源于古斯拉夫语,尤其是东斯拉夫语 ,后来是教会斯拉夫语,规范了一部分古俄语,但那时候的俄语语法混乱,用词不准,真正 从普希金开始 ,是由他规范了“ 现代俄罗斯标准语 ”这也基本上是现在的俄语。 跟德语的渊源应该没有那么大的,当然印欧语系互相之间自然都有千丝万缕的联系,但就我所知对俄语的影响来说,德语肯定排不上。 希腊语倒是有联系的。俄语字母最开始的起源是 基里尔字母 ,就是来源于希腊语字母。 英语的影响如今确实越来越大了, 英语词汇入侵得厉害 ,不仅一些新兴词汇大多用英语代替,就连一些原本俄语用的,也弃之不用,用比较大众化的英语词汇。比如“志愿者”,俄语现在常用的волентёр就是根据volunteer变化而来,读音也差不多。现在这种词极多,已经蔓延在了各个领域。多出现在新兴事物上。但事实上,俄语本身有很多词的读音和英语就很像的——学小语种的人应该都有体会——这倒不全是英语的影响,只是英语和欧洲许多语言系出同源,这里不一定是互相影响。 Современные лексические и грамматические черты русского языка — результат длительного взаимодействия различных восточнославянских диалектов, распространённых на великорусской территории, и церковнославянского языка, возникшего в результате адаптации на русской почве языка первых христианских книг IX—XI вв. («старославянского языка»). 以上是维基百科的俄文版,大意是说:现代的俄语口语及书面语基本上是东斯拉夫语各地方言相互作用的结果,以及9~11世纪第一批教会书籍中对古俄语做了部分调整的教会斯拉夫语。 即是说,希腊语、东斯拉夫语、宗教(东正教)用语,都对俄语的形成产生影响。只是那些很古老了,许多用词也都已经被抛弃,如今的标准语是从普希金开始规范的。 我的所知基本就这些了,剩下等更专业的人来解答吧。", "meta": {"Prob":0.38267436731076065}} + {"id": 92661, "text": "你去过的哪些博物馆特别吸引你? 初步浏览了大神们的答案,我就简单阐述一下我去过的四个国内博物馆,上海博物馆、国家博物馆、南京博物院和苏州博物馆。 上海博物馆让我最震撼的是青铜器和字画,尤其是青铜器,基本上把每个青铜器都拍了下来。最喜欢的青铜器是编钟,就安静的陈列在那里,想到千年前有人敲响它,演奏着古朴的旋律,久久不想离开。在上博你可以看到唐寅、文征明的字画,那是用毛笔在宣纸上勾勒出的水墨线条。对了上博还有许多精美的瓷器,玉器,绝对值得一去。 国家博物馆,设计风格独特,在每个馆外也有展品。每个展馆不大,东西没有上博多。个人觉得国家博物馆最大的特色就是有许多国外展厅,包括非洲,俄罗斯,还有各国赠与中国领导人的纪念品。 南京博物院,印象不深,去的那天下雨,展品也很丰富,但是基本没印象了。印象最深的是有一条“仿古街”,还原了大概民国时的街道。南京博物院有个科技展区还挺好玩的。 苏州博物馆,值得一看的是场馆设计,藏品在看完上博后就没什么意思啦。不过离拙政园很近,可以顺便去看看。", "meta": {"Prob":0.31295625980305586}} + ``` + + - Excluded Data + ```json lines + // 'text': #input text , 'prob': probability of being low quality + {"id": 904009, "text": "如何正确看待三生三世十里桃花电视剧打击盗版?首先,三生三世十里桃花这部剧说打击盗版这个行为是正确的,维护自己的版权。但是三生三世十里桃花是抄袭网络某耽美作者的啊,所以说抄耽就不算抄袭?原谅贫道孤陋寡闻,本身就是抄袭的现在你在这里告诉我你要打击盗版?!完全就是贼喊捉贼吧,哈哈哈哈哈哈这笑话我能笑一年听说现在要出电影了?和华胥引一起?希望不会撞车哈哈哈哈哈哈", "type": "qa", "source_dataset": "wg_batch2_zhihu", "title": "", "meta": {"prob": 0.8741011765270877}} + {"id": 904011, "text": "怎么样证明自己不是渣男?娶她。","meta": {"prob": 0.5239115483615828}} + ``` + - Classfier: Bag-of-Words-based Ridge Classifier + - Bag-of-Words: Each word in the text is represented as a one-hot encoded vector. + - Ridge Classifier: A Logistic Classifier with an L2 Norm, which uses L2 regularization to constrain the size of the model's regression coefficients, thereby enhancing the model's generalization ability and stability in a linear classification model. \ No newline at end of file diff --git a/flagdata/cleaner/docs/Qa_Cleaner_ZH.md b/flagdata/cleaner/docs/Qa_Cleaner_ZH.md new file mode 100644 index 0000000..f55abec --- /dev/null +++ b/flagdata/cleaner/docs/Qa_Cleaner_ZH.md @@ -0,0 +1,40 @@ +- QACleaner:利用事先标注的文本质量数据,训练分类器,以过滤低质量数据。 + - 输入: + - 标注数据: + ```json lines + //{"text": #input text, "label": 0/1, 0 for high quality, 1 indicating low quality} + //低质量:赌博/诈骗/低俗色情//营销内容 + {"text": "“酒店招女/男公关”小广告的目的是什么? 帮我改了标题之后似乎把我的原意给模糊了,我是想问:小广告重点是想“招聘女职员”,还是想吸引男人注意去消费。 本人带队直招:(不收取任何形式的押金、进场费、管理费、介绍费等等)。我们客源多而稳定生意红火,绝对不会出现几天上不到班的情况,挣钱不是梦。每个做夜场的女孩都不想东奔西走的找场子,就算日新俩三千,三天打鱼两天晒网,到头来也赚不到什么钱。都想找一个能稳定的可以挣到钱的场子和一个好的领队。如果你们也是这样想的,那么就请相信我,我这里就是你们不二的选择,如果你犹豫了错过了,那就只能怪你们自己", "label": "0"} + {"text": "如何通俗易懂地解释遗传算法?有什么例子? 大三软件工程学生,以前只听过遗传算法这个名字,但是真正是怎么一回事没有了解过。今天刚好看到 的回答,想起这学期正好选了人工智能这堂课,就觉得想试着码一下这个算法,算是提前预习一下。于是花了两个小时把这个算法大概搞懂了,把思路写一遍,也算是自己再熟悉一遍(如果哪里搞错了请大神们轻喷 理解这个算法首先要理解一些术语。下图(来自 Genetic Algorithms Fundamentals )把术语之间的关系表示的很清楚。 遗传算法就是通过不断地进化,将种群里面我们最想要的染色体保留下来。进化多次之后,种群里的大部分染色体都会是比较优势的染色体(我们想要的解),所以我们可以通过这个算法获取多个较优解。 ps: 关于基因和等位基因的区别:基因(gene)是指染色体上的特定位置,而等位基因(allele)则是当前染色体在该基因处的值。 知道一些术语之间的关系之后,可以试着尝试搞懂算法了。拿@sjyan刚刚这道题做例子。 求解函数 f(x) = x + 10*sin(5*x) + 7*cos(4*x) 在区间[0,9]的最大值。 用遗传算法解这道题的过程, 他说得很清楚,主要是三个阶段: 初始化阶段 确定染色体的形式。先选择一种方式对x进行编码,使其从实际的解空间(phenotype space)被映射到编码空间(genotype space),也就是把实数x变成一条染色体。在这道题中我沿用了@sjyan的编码方式,即把解空间划分为 2^{17}-1 等份,然后通过一个17个bit的染色体来表达解空间的实数值。 确定好染色体形式之后,我们便可以拿它生成一个初始的种群。 进化迭代阶段 接下来会进行不停地进化迭代,每次迭代主要由三个阶段组成:选择、交叉、变异。 选择阶段。选择阶段经历了适应性选择和随机选择。在适应性选择中,我们通过适应性函数(fitness function)对种群中的每一条染色体进行适应性评估,按评估结果对染色体进行排序。筛选出适应性最好的一定数量(可以通过参数调节)的染色体,作为下一代的父母加入存货列表。而在随机选择中,我们会随机挑选一些没有通过适应性选择的个体也加入存活列表,这样做是为了使得一些拥有潜在价值基因但适应性很差的个体得以生存下来。 交叉阶段。每一代染色体的数量是一定的,我们淘汰了一部分染色体,就要生成新的染色体来补足空缺。从上一代中,我们保留了一部分存活的染色体,它们之间将会进行交叉。交叉是指随机从存活列表中抽取两个染色体,将这两条染色体进行融合从而生成新的染色体(就是取一部分父染色体的基因,再在母染色体取在父染色体没有取到的基因,把这些基因合成一条新的染色体),把新的染色体加入种群中。交叉操作会一直持续,直到种群数量跟之前的种群数量相同。 变异阶段。对于种群中的每一条染色体,使其一定几率地发生随机变异(在这个例子下就是反转染色体上某一个bit的值)。 验收阶段 经过很多代的进化之后,种群里面的染色体基本上符合最优化的要求了。这时就可以去对里面的染色体进行解码(decode),将其转化为实际的解。 python实现 代码写的挺渣的,不过标了很多注释。 #encoding=utf-8import mathimport randomimport operatorclass GA(): def __init__(self, length, count): # 染色体长度 self.length = length # 种群中的染色体数量 self.count = count # 随机生成初始种群 self.population = self.gen_population(length, count) def evolve(self, retain_rate=0.2, random_select_rate=0.5, mutation_rate=0.01): \"\"\" 进化 对当前一代种群依次进行选择、交叉并生成新一代种群,然后对新一代种群进行变异 \"\"\" parents = self.selection(retain_rate, random_select_rate) self.crossover(parents) self.mutation(mutation_rate) def gen_chromosome(self, length): \"\"\" 随机生成长度为length的染色体,每个基因的取值是0或1 这里用一个bit表示一个基因 \"\"\" chromosome = 0 for i in xrange(length): chromosome |= (1 << i) * random.randint(0, 1) return chromosome def gen_population(self, length, count): \"\"\" 获取初始种群(一个含有count个长度为length的染色体的列表) \"\"\" return [self.gen_chromosome(length) for i in xrange(count)] def fitness(self, chromosome): \"\"\" 计算适应度,将染色体解码为0~9之间数字,代入函数计算 因为是求最大值,所以数值越大,适应度越高 \"\"\" x = self.decode(chromosome) return x + 10*math.sin(5*x) + 7*math.cos(4*x) def selection(self, retain_rate, random_select_rate): \"\"\" 选择 先对适应度从大到小排序,选出存活的染色体 再进行随机选择,选出适应度虽然小,但是幸存下来的个体 \"\"\" # 对适应度从大到小进行排序 graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] # 选出适应性强的染色体 retain_length = int(len(graded) * retain_rate) parents = graded[:retain_length] # 选出适应性不强,但是幸存的染色体 for chromosome in graded[retain_length:]: if random.random() < random_select_rate: parents.append(chromosome) return parents def crossover(self, parents): \"\"\" 染色体的交叉、繁殖,生成新一代的种群 \"\"\" # 新出生的孩子,最终会被加入存活下来的父母之中,形成新一代的种群。 children = [] # 需要繁殖的孩子的量 target_count = len(self.population) - len(parents) # 开始根据需要的量进行繁殖 while len(children) < target_count: male = random.randint(0, len(parents)-1) female = random.randint(0, len(parents)-1) if male != female: # 随机选取交叉点 cross_pos = random.randint(0, self.length) # 生成掩码,方便位操作 mask = 0 for i in xrange(cross_pos): mask |= (1 << i) male = parents[male] female = parents[female] # 孩子将获得父亲在交叉点前的基因和母亲在交叉点后(包括交叉点)的基因 child = ((male & mask) | (female & ~mask)) & ((1 << self.length) - 1) children.append(child) # 经过繁殖后,孩子和父母的数量与原始种群数量相等,在这里可以更新种群。 self.population = parents + children def mutation(self, rate): \"\"\" 变异 对种群中的所有个体,随机改变某个个体中的某个基因 \"\"\" for i in xrange(len(self.population)): if random.random() < rate: j = random.randint(0, self.length-1) self.population[i] ^= 1 << j def decode(self, chromosome): \"\"\" 解码染色体,将二进制转化为属于[0, 9]的实数 \"\"\" return chromosome * 9.0 / (2**self.length-1) def result(self): \"\"\" 获得当前代的最优值,这里取的是函数取最大值时x的值。 \"\"\" graded = [(self.fitness(chromosome), chromosome) for chromosome in self.population] graded = [x[1] for x in sorted(graded, reverse=True)] return ga.decode(graded[0]) if __name__ == '__main__': # 染色体长度为17, 种群数量为300 ga = GA(17, 300) # 200次进化迭代 for x in xrange(200): ga.evolve() print ga.result() 简陋的)运行结果,很接近 总结 遗传算法可以产出一组相对较优的解,而且不需要根据具体问题去进行过多的逻辑推演,速度也相对较快。缺点就是不能保证解是最优的。", "label": "1"} + {"text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)? 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。 覆盖率高,当然好,但工程类的东西往往需要妥协和平衡,时间不够时,先测什么,后测什么,就更重要一些了。", "label": "1"} + //低质量:信息量低//口语化内容 + {"text": "有车是怎样的体验? 生活质量有提高。也有降低。 可以想去哪去哪,不开心就转一圈。 降低也有很多。刚毕业不久,工作原因买了个商务车。油耗保养什么的挤占你的工资。还有就是,朋友之间的,一个电话,你可能就要跑很久接送人,比较不开心。 最特么重要的是,当你看见莫名其妙的罚款单的时候。 索性没人认识。不打码了。答案千万别火啊。", "label": "0"} + //…… + ``` + - 待清洗数据 + ```json lines + {"id": 904009, "text": "如何正确看待三生三世十里桃花电视剧打击盗版?首先,三生三世十里桃花这部剧说打击盗版这个行为是正确的,维护自己的版权。但是三生三世十里桃花是抄袭网络某耽美作者的啊,所以说抄耽就不算抄袭?原谅贫道孤陋寡闻,本身就是抄袭的现在你在这里告诉我你要打击盗版?!完全就是贼喊捉贼吧,哈哈哈哈哈哈这笑话我能笑一年听说现在要出电影了?和华胥引一起?希望不会撞车哈哈哈哈哈哈", "type": "qa", "source_dataset": "wg_batch2_zhihu"} + {"id": 904011, "text": "怎么样证明自己不是渣男?娶她。"} + //…… + {"id": 3330999, "text": "哪些毕业论文的「致谢」部分让你印象深刻? 致谢 时近仲夏,岁在乙未,梧桐树下,清风明月,闲庭信步,回思往事,处处感怀。余十有八入长大,今已七年有余,入校所植之树,今已蔚然成荫矣,叹七载寒窗仅须臾耳,余多可喜多有有憾。 喜者有二,曰路漫漫而有爱,曰上下求索而有得。 有爱者,曰养育恩,曰师生情,曰鹣鲽意,曰同窗谊。 吾跪而叩谢者一曰养育恩。吾本草芥寒门,得有今日之成,皆赖椿萱。家慈教子以严,温良恭俭,家严襟怀洒落,博学多才,尤擅属文。暌隔庭闱,瞬已数载,椿萱衰迈,松菊萧条,每念及兹,余心怆然,唯朝乾夕惕,以报得万一。 吾跪而叩谢者二曰师生情。初,余师从周仁老师,吾才薄识短,囿于章句之学,不谙处世之道,恩师言传身教,耳提面命。晨夕讲授,通宵达旦,不达则不休,必通彻乃已。吾师徒二人尝因公三昼夜不眠不休于兰,盖因推敲二字。余性散漫,恩师尝教之曰:“天下事,果能坚韧不懈,总有可志竟成。志之所向,金石为开,谁能御之?”初吾常思应酬周到,恩师常诲之曰:“大抵任事之人,断不能有毁而无誉,有恩而无怨,不求人人应酬,事事周到,但求问心无愧,知恩图报。”陶冶变化,余性亦有所改。恩师用心之深,用力之勤,无人可及,愿承欢膝下,竭吾之能,以孝恩师,以寸草之心报三春之晖。 吾师鲁师,实乃真学者也,学问精深,品格方正。吾毕业论文开题之际,恰鲁师赴美深造,然其心系学生,吾之论文承蒙鲁师反复开说,屡次跨洋与吾讲说至数刻之久,先生以苦口滴杜鹃之血,增删五次,六易吾稿。余初做学问,困心衡虑,不得其法,胸有千言,或笔下无字,或洋洋洒洒,不得要领,先生言之切切,微言大义,专从学理上用力,不厌其烦,听之,昭然若发蒙也。余之理法才气俱进,胸中之意,今可达腕下。余本才短又性浮嚣,好用小智小术,鲁师常诲之曰:“天下之至拙,能胜天下之至巧。读书立志,须以困勉之功,锱铢积累,则终可达,浮光掠影,终不济事。读书困时切莫放弃,熬过此关,便可少进,日日精进,自有通达之时。”又曰:“自古成名者,多由笨干,天下事,最怕认真二字,凡事认真,终得其报。”三度春秋,鱼渔双授,先生于此,一生永铭。 三曰鹣鲽意。吾爱志伟,伴我寒窗,容余任性,许吾安稳,乃吾铠甲亦吾软肋,路遥遥,愿与君偕行。 四曰同窗谊。竹马之交霍然,共战人生数役,同享生活之乐,互为莫逆,情同手足;友迪,与交七载,相与分忧,互为解难,受益匪浅;友村,居住同室,切磋学艺,同窗情深;友恒大,胸怀锦绣,虽为新交亦引为知己;师兄张宏东、任玉龙、师姐海兰于吾亦有垂教之恩。得友如斯,幸甚至哉。 恩长笔短,吾之述不足以表其万一,寥寥数笔,仅余区区之意也。 有得者,曰知研究之真味,曰体求知之乐趣。书山浩浩,学海淼淼,今有寸得,如沧海一粟,余者吾将毕生求索。 憾者有二, 曰制之无节,曰行之无恒。此二者吾之所短也,余亦深以此自愧。时忆求学事,本因用功之时,常常玩偈,致百不遂志,精神散漫,愧悔憧扰,不能摆脱。人生之大憾非不能也,盖本可以也。语不云乎:往者不可谏,来者犹可追。自今以始,吾必不得自逸,使恒心常在。 伤离别,具表文,余心惶惶然。愿师长安康,同窗如意,再拜稽首。 乙未仲夏于博士楼", "meta": {}} + {"id": 15134791, "text": "都说俄语难,请问俄语究竟难在什么地方? 字母发音这些请勿赘述,因为我觉得凡是打算学一门语言的人总会对这些有心理准备。我想了解俄语语法和词汇方面的难点。还有词源什么的。如果可以的话请对照着中文和英文这两种一般人有了解的语言来说明。 没想到这么多赞同,过来继续补充一些 ======================================= 俄语小本来凑热闹,学得不精,只是少有看到和自己语言有关的忍不住。 如果初学的话,最先接触的肯定是 1 名词性数格 这些在英文中是不存在的,但是很多印欧语系的语言中都有,所以并不特殊,只能说英语极度简化了。名词有3个性,单复数,6个格。 2 动词变位 动词有六个变位,按我们老师的话说,俄语的动词原形其实记了没啥用的,基本用不着。这话夸张了,但可以想见,大多数时候看到的俄语动词都是变位形式,有的特殊变位甚至让你无法推断其原形,而且你在说每句话的时候动词不能够脱口而出,要先在脑子里根据主语及时态给它变位,所以初期很容易说得磕磕巴巴的。其实简单地说动词变位是不准确的,很多人只看到动词的六个变位,事实上这个只要背下来常运用总会熟练的,动词的复杂程度远远不止于此。俄语的动词十分发达,最好的体现在动词前缀上。前缀类型有几十种。同样的词源,加不同的前缀意思就完全不同,甚至南辕北辙,而同样一个动词本身的意思也极其复杂,我总觉得比英语复杂,一个动词有本义引申义经常不少于10种,太常见了,总是容易弄混。 3 上述都算是语法吧,包括 形动词、副动词、无人称句 等等,都是其他语言不存在的语言现象(经知友 提醒西班牙语有副动词,更正),需要一阵的理解钻研,但语法这种东西,一旦理解了就可以举一反三,其实并不是高不可攀的。( 这里补充一点无人称句,为什么列出来,因为我们最熟悉的外语——英语中不存在无人称句。英语有It is raining. 这种句子,中文翻译或者隐含意是“天气”,天下雨了,英文中也是有个形式主语it的,即使没有真正主语,英文中一定有个形式主语,可俄语的无人称句连形式主语都没有,就是没有主语。比如下雨了,“下”这个词会是动词,“雨”变成宾语,没有任何主语。 初学者肯定容易不好理解,但都是可以攻克的难关,像前面说的,一旦懂了就是懂了。) 4 还有许多“ 习惯 ”问题。语言这东西很玄妙。总的说来就是“我们习惯这么说而不那么说”。比如用词。词义辨析是很大一块冰山。同样意思的词有五六个,他们可以是语体的区别(公文语体、官方语、口语等),可以使表达情绪的强烈不同,可以是语境不同、可以是不同的内涵和外延。在这里举个例加强理解: 俄语中常用的“有助于”有三个 (只是常用),这三个要这么区分: 一个是正式语体中用,一个是你全权帮助协助(比如你帮助妹妹学习),一个是你的帮助是一部分因素,比如好的天气有助于运动会的开展。以上不可以混用。这么一看英语的help是不是太可爱了! (这个例子是才接触过所以有印象,其实不算什么,远远不能体现俄语的变态……)总之区别或大或小,你都要记,用错了词,即使意思一样,听起来也很奇怪,会让俄罗斯人不理解。 这个话题说起来太大了,包括文化问题映射在语言上等,先写这么多,有空再来更。 =================================== 应 来补充 感谢各位赞同~ 5 俄罗斯的语气词也很让人头疼。当然如果只是想粗略地学,仅仅和人交流的层面,语气词学不通无伤大雅,大不了不用么!只不过你不会用语气词,你跟人讲话、发邮件就好像是新闻联播一样,这些“小词儿”,数量多,但最关键的是一个词有很多种语气……像же、уж之类,不同语境下意思迥异,尤其在读文学作品,在揣摩和你对话的人的言下之意(比如我只考了80分和我考了80分呢 前者你要安慰后者你要恭喜 搞错了可咋整)上用处极大不可大意。 6 还能想到的俄语比中文发达的一个类别是 情绪、表情 。很多词甚至没有办法找到相应的中文对应。而且中文有一点很好,就是它的“ 上位词 ”很发达。比如说 小说统称为小说,讲起来很方便,不管你写的哪类,总归是小说,我不会说错。但俄语中没有“小说”这个词,只有长篇小说,中篇小说,短篇小说,科幻小说,爱情小说等等……说之前必须要了解才能张口。否则就会犯“前两天看了短篇小说《安娜卡列尼娜》”这种错误,一定要背清楚啊不然会被鄙视死…… 类似的情况不胜枚举,由于没有上位词,俄语的表达难度无形中增加,但是也更准确了。说上位词的原因是,比如在情绪一类,中国有喜怒哀乐,由于上位词的发达(其实这里这么用未必合适,但为了方便起见),喜都是喜:惊喜、狂喜、窃喜,怒都是怒:暴怒、狂怒、怨怒,一看就很好理解。而俄语中这些词都是不一样的,就是长得区别很大!当然外语很多都是这样。比如“惊讶”吧,俄语常用的有6个,长得完全不一样,程度递增,翻译就是: 略惊讶,稍惊讶,很惊讶,特别惊讶,惊讶死了,震惊啊啊啊! 其中第二个惊讶只能用于好事,第六个惊讶只能用于坏事。你都要背下来,不可以乱用。 7 语序 这也算是“习惯”一类吧。你们知道,由于有性数格,所以俄语中的语序没有英文中文那么重要。比如“我打你”“你打我”在中文不一样,顺序可不能乱,但在俄语中就无所谓,因为被打的那个人是变格的,所以谁被打一目了然,你想把谁放在前面都可以。当然语序可不只是这么简单,除了说话时要把 新信息放在最后 , 状语一般放两边 等等这些最基础的,正是由于俄语的语序不影响语法状态,怎么说都行,你就更不能“怎么说都行”,句子一旦长了,怎样编排他们的顺序就很有讲究,顺序说错了会给人一种“话没说完”或者别别扭扭的感觉,这个是长期训练出来的语感,没什么规律,就是多读多说。 文化这方面就极其庞杂了,而又极其重要。俄罗斯文学高度发展,几乎每个人都会背几首普希金叶赛宁的诗,要学俄语这些都是必须。人家经常说着说着“我们的诗人那曾经忧郁的眼神!”,那是说普希金呢;“像塔尼娅一样纯真的爱恋!”那是说《叶甫盖尼奥涅金》呢。跟英国人说啥都喜欢引圣经一个道理。当然俄罗斯文学不止普希金,他们的教育普及程度很高,人们说话喜欢引经据典,尤喜诗歌,所以基础的要知道。 俄罗斯也是一个十分有“传统”的国家,像中国一样。人们说话喜欢引用谚语俗语,而且谚语俗语很大程度上体现这个国家的一些精神。比如俄罗斯对“家国”的热爱,对“入侵”的反感,对“朋友”的珍视,都可以在谚语上体现出来,如果学俄语的话是一定要背的。 比如有句谚语:“ 不请自来的客人比鞑靼人还恶劣 !”(参见俄历史上蒙古鞑靼人入侵并统治240年) 他们喜欢熊,所以就有“ 熊在森林就好比主人在家 ”,认为熊聪明敏锐,守护家园,且憨态可掬。(熊真的对俄罗斯人十分重要,他们喜欢熊就像我们见到大熊猫就叫“好萌”是一个道理) 包括苏联那段历史,他们也会经常调侃,相关的谚语和俗语也很多。 先写这么多,吃饭去了。 =================================== 应 来补充,关于词源,没有具体研究过,还是请专业人士再细述一下吧。 据我所知的,俄语是 源于古斯拉夫语,尤其是东斯拉夫语 ,后来是教会斯拉夫语,规范了一部分古俄语,但那时候的俄语语法混乱,用词不准,真正 从普希金开始 ,是由他规范了“ 现代俄罗斯标准语 ”这也基本上是现在的俄语。 跟德语的渊源应该没有那么大的,当然印欧语系互相之间自然都有千丝万缕的联系,但就我所知对俄语的影响来说,德语肯定排不上。 希腊语倒是有联系的。俄语字母最开始的起源是 基里尔字母 ,就是来源于希腊语字母。 英语的影响如今确实越来越大了, 英语词汇入侵得厉害 ,不仅一些新兴词汇大多用英语代替,就连一些原本俄语用的,也弃之不用,用比较大众化的英语词汇。比如“志愿者”,俄语现在常用的волентёр就是根据volunteer变化而来,读音也差不多。现在这种词极多,已经蔓延在了各个领域。多出现在新兴事物上。但事实上,俄语本身有很多词的读音和英语就很像的——学小语种的人应该都有体会——这倒不全是英语的影响,只是英语和欧洲许多语言系出同源,这里不一定是互相影响。 Современные лексические и грамматические черты русского языка — результат длительного взаимодействия различных восточнославянских диалектов, распространённых на великорусской территории, и церковнославянского языка, возникшего в результате адаптации на русской почве языка первых христианских книг IX—XI вв. («старославянского языка»). 以上是维基百科的俄文版,大意是说:现代的俄语口语及书面语基本上是东斯拉夫语各地方言相互作用的结果,以及9~11世纪第一批教会书籍中对古俄语做了部分调整的教会斯拉夫语。 即是说,希腊语、东斯拉夫语、宗教(东正教)用语,都对俄语的形成产生影响。只是那些很古老了,许多用词也都已经被抛弃,如今的标准语是从普希金开始规范的。 我的所知基本就这些了,剩下等更专业的人来解答吧。"} + {"id": 92661, "text": "你去过的哪些博物馆特别吸引你? 初步浏览了大神们的答案,我就简单阐述一下我去过的四个国内博物馆,上海博物馆、国家博物馆、南京博物院和苏州博物馆。 上海博物馆让我最震撼的是青铜器和字画,尤其是青铜器,基本上把每个青铜器都拍了下来。最喜欢的青铜器是编钟,就安静的陈列在那里,想到千年前有人敲响它,演奏着古朴的旋律,久久不想离开。在上博你可以看到唐寅、文征明的字画,那是用毛笔在宣纸上勾勒出的水墨线条。对了上博还有许多精美的瓷器,玉器,绝对值得一去。 国家博物馆,设计风格独特,在每个馆外也有展品。每个展馆不大,东西没有上博多。个人觉得国家博物馆最大的特色就是有许多国外展厅,包括非洲,俄罗斯,还有各国赠与中国领导人的纪念品。 南京博物院,印象不深,去的那天下雨,展品也很丰富,但是基本没印象了。印象最深的是有一条“仿古街”,还原了大概民国时的街道。南京博物院有个科技展区还挺好玩的。 苏州博物馆,值得一看的是场馆设计,藏品在看完上博后就没什么意思啦。不过离拙政园很近,可以顺便去看看。"} + ``` + - 输出: + - 已清洗数据 + ```json lines + // 'text': #input text , 'prob': probability of being low quality + {"id": 3330999, "text": "哪些毕业论文的「致谢」部分让你印象深刻? 致谢 时近仲夏,岁在乙未,梧桐树下,清风明月,闲庭信步,回思往事,处处感怀。余十有八入长大,今已七年有余,入校所植之树,今已蔚然成荫矣,叹七载寒窗仅须臾耳,余多可喜多有有憾。 喜者有二,曰路漫漫而有爱,曰上下求索而有得。 有爱者,曰养育恩,曰师生情,曰鹣鲽意,曰同窗谊。 吾跪而叩谢者一曰养育恩。吾本草芥寒门,得有今日之成,皆赖椿萱。家慈教子以严,温良恭俭,家严襟怀洒落,博学多才,尤擅属文。暌隔庭闱,瞬已数载,椿萱衰迈,松菊萧条,每念及兹,余心怆然,唯朝乾夕惕,以报得万一。 吾跪而叩谢者二曰师生情。初,余师从周仁老师,吾才薄识短,囿于章句之学,不谙处世之道,恩师言传身教,耳提面命。晨夕讲授,通宵达旦,不达则不休,必通彻乃已。吾师徒二人尝因公三昼夜不眠不休于兰,盖因推敲二字。余性散漫,恩师尝教之曰:“天下事,果能坚韧不懈,总有可志竟成。志之所向,金石为开,谁能御之?”初吾常思应酬周到,恩师常诲之曰:“大抵任事之人,断不能有毁而无誉,有恩而无怨,不求人人应酬,事事周到,但求问心无愧,知恩图报。”陶冶变化,余性亦有所改。恩师用心之深,用力之勤,无人可及,愿承欢膝下,竭吾之能,以孝恩师,以寸草之心报三春之晖。 吾师鲁师,实乃真学者也,学问精深,品格方正。吾毕业论文开题之际,恰鲁师赴美深造,然其心系学生,吾之论文承蒙鲁师反复开说,屡次跨洋与吾讲说至数刻之久,先生以苦口滴杜鹃之血,增删五次,六易吾稿。余初做学问,困心衡虑,不得其法,胸有千言,或笔下无字,或洋洋洒洒,不得要领,先生言之切切,微言大义,专从学理上用力,不厌其烦,听之,昭然若发蒙也。余之理法才气俱进,胸中之意,今可达腕下。余本才短又性浮嚣,好用小智小术,鲁师常诲之曰:“天下之至拙,能胜天下之至巧。读书立志,须以困勉之功,锱铢积累,则终可达,浮光掠影,终不济事。读书困时切莫放弃,熬过此关,便可少进,日日精进,自有通达之时。”又曰:“自古成名者,多由笨干,天下事,最怕认真二字,凡事认真,终得其报。”三度春秋,鱼渔双授,先生于此,一生永铭。 三曰鹣鲽意。吾爱志伟,伴我寒窗,容余任性,许吾安稳,乃吾铠甲亦吾软肋,路遥遥,愿与君偕行。 四曰同窗谊。竹马之交霍然,共战人生数役,同享生活之乐,互为莫逆,情同手足;友迪,与交七载,相与分忧,互为解难,受益匪浅;友村,居住同室,切磋学艺,同窗情深;友恒大,胸怀锦绣,虽为新交亦引为知己;师兄张宏东、任玉龙、师姐海兰于吾亦有垂教之恩。得友如斯,幸甚至哉。 恩长笔短,吾之述不足以表其万一,寥寥数笔,仅余区区之意也。 有得者,曰知研究之真味,曰体求知之乐趣。书山浩浩,学海淼淼,今有寸得,如沧海一粟,余者吾将毕生求索。 憾者有二, 曰制之无节,曰行之无恒。此二者吾之所短也,余亦深以此自愧。时忆求学事,本因用功之时,常常玩偈,致百不遂志,精神散漫,愧悔憧扰,不能摆脱。人生之大憾非不能也,盖本可以也。语不云乎:往者不可谏,来者犹可追。自今以始,吾必不得自逸,使恒心常在。 伤离别,具表文,余心惶惶然。愿师长安康,同窗如意,再拜稽首。 乙未仲夏于博士楼","meta": {"Prob":0.04515477493841735}} + {"id": 15134791, "text": "都说俄语难,请问俄语究竟难在什么地方? 字母发音这些请勿赘述,因为我觉得凡是打算学一门语言的人总会对这些有心理准备。我想了解俄语语法和词汇方面的难点。还有词源什么的。如果可以的话请对照着中文和英文这两种一般人有了解的语言来说明。 没想到这么多赞同,过来继续补充一些 ======================================= 俄语小本来凑热闹,学得不精,只是少有看到和自己语言有关的忍不住。 如果初学的话,最先接触的肯定是 1 名词性数格 这些在英文中是不存在的,但是很多印欧语系的语言中都有,所以并不特殊,只能说英语极度简化了。名词有3个性,单复数,6个格。 2 动词变位 动词有六个变位,按我们老师的话说,俄语的动词原形其实记了没啥用的,基本用不着。这话夸张了,但可以想见,大多数时候看到的俄语动词都是变位形式,有的特殊变位甚至让你无法推断其原形,而且你在说每句话的时候动词不能够脱口而出,要先在脑子里根据主语及时态给它变位,所以初期很容易说得磕磕巴巴的。其实简单地说动词变位是不准确的,很多人只看到动词的六个变位,事实上这个只要背下来常运用总会熟练的,动词的复杂程度远远不止于此。俄语的动词十分发达,最好的体现在动词前缀上。前缀类型有几十种。同样的词源,加不同的前缀意思就完全不同,甚至南辕北辙,而同样一个动词本身的意思也极其复杂,我总觉得比英语复杂,一个动词有本义引申义经常不少于10种,太常见了,总是容易弄混。 3 上述都算是语法吧,包括 形动词、副动词、无人称句 等等,都是其他语言不存在的语言现象(经知友 提醒西班牙语有副动词,更正),需要一阵的理解钻研,但语法这种东西,一旦理解了就可以举一反三,其实并不是高不可攀的。( 这里补充一点无人称句,为什么列出来,因为我们最熟悉的外语——英语中不存在无人称句。英语有It is raining. 这种句子,中文翻译或者隐含意是“天气”,天下雨了,英文中也是有个形式主语it的,即使没有真正主语,英文中一定有个形式主语,可俄语的无人称句连形式主语都没有,就是没有主语。比如下雨了,“下”这个词会是动词,“雨”变成宾语,没有任何主语。 初学者肯定容易不好理解,但都是可以攻克的难关,像前面说的,一旦懂了就是懂了。) 4 还有许多“ 习惯 ”问题。语言这东西很玄妙。总的说来就是“我们习惯这么说而不那么说”。比如用词。词义辨析是很大一块冰山。同样意思的词有五六个,他们可以是语体的区别(公文语体、官方语、口语等),可以使表达情绪的强烈不同,可以是语境不同、可以是不同的内涵和外延。在这里举个例加强理解: 俄语中常用的“有助于”有三个 (只是常用),这三个要这么区分: 一个是正式语体中用,一个是你全权帮助协助(比如你帮助妹妹学习),一个是你的帮助是一部分因素,比如好的天气有助于运动会的开展。以上不可以混用。这么一看英语的help是不是太可爱了! (这个例子是才接触过所以有印象,其实不算什么,远远不能体现俄语的变态……)总之区别或大或小,你都要记,用错了词,即使意思一样,听起来也很奇怪,会让俄罗斯人不理解。 这个话题说起来太大了,包括文化问题映射在语言上等,先写这么多,有空再来更。 =================================== 应 来补充 感谢各位赞同~ 5 俄罗斯的语气词也很让人头疼。当然如果只是想粗略地学,仅仅和人交流的层面,语气词学不通无伤大雅,大不了不用么!只不过你不会用语气词,你跟人讲话、发邮件就好像是新闻联播一样,这些“小词儿”,数量多,但最关键的是一个词有很多种语气……像же、уж之类,不同语境下意思迥异,尤其在读文学作品,在揣摩和你对话的人的言下之意(比如我只考了80分和我考了80分呢 前者你要安慰后者你要恭喜 搞错了可咋整)上用处极大不可大意。 6 还能想到的俄语比中文发达的一个类别是 情绪、表情 。很多词甚至没有办法找到相应的中文对应。而且中文有一点很好,就是它的“ 上位词 ”很发达。比如说 小说统称为小说,讲起来很方便,不管你写的哪类,总归是小说,我不会说错。但俄语中没有“小说”这个词,只有长篇小说,中篇小说,短篇小说,科幻小说,爱情小说等等……说之前必须要了解才能张口。否则就会犯“前两天看了短篇小说《安娜卡列尼娜》”这种错误,一定要背清楚啊不然会被鄙视死…… 类似的情况不胜枚举,由于没有上位词,俄语的表达难度无形中增加,但是也更准确了。说上位词的原因是,比如在情绪一类,中国有喜怒哀乐,由于上位词的发达(其实这里这么用未必合适,但为了方便起见),喜都是喜:惊喜、狂喜、窃喜,怒都是怒:暴怒、狂怒、怨怒,一看就很好理解。而俄语中这些词都是不一样的,就是长得区别很大!当然外语很多都是这样。比如“惊讶”吧,俄语常用的有6个,长得完全不一样,程度递增,翻译就是: 略惊讶,稍惊讶,很惊讶,特别惊讶,惊讶死了,震惊啊啊啊! 其中第二个惊讶只能用于好事,第六个惊讶只能用于坏事。你都要背下来,不可以乱用。 7 语序 这也算是“习惯”一类吧。你们知道,由于有性数格,所以俄语中的语序没有英文中文那么重要。比如“我打你”“你打我”在中文不一样,顺序可不能乱,但在俄语中就无所谓,因为被打的那个人是变格的,所以谁被打一目了然,你想把谁放在前面都可以。当然语序可不只是这么简单,除了说话时要把 新信息放在最后 , 状语一般放两边 等等这些最基础的,正是由于俄语的语序不影响语法状态,怎么说都行,你就更不能“怎么说都行”,句子一旦长了,怎样编排他们的顺序就很有讲究,顺序说错了会给人一种“话没说完”或者别别扭扭的感觉,这个是长期训练出来的语感,没什么规律,就是多读多说。 文化这方面就极其庞杂了,而又极其重要。俄罗斯文学高度发展,几乎每个人都会背几首普希金叶赛宁的诗,要学俄语这些都是必须。人家经常说着说着“我们的诗人那曾经忧郁的眼神!”,那是说普希金呢;“像塔尼娅一样纯真的爱恋!”那是说《叶甫盖尼奥涅金》呢。跟英国人说啥都喜欢引圣经一个道理。当然俄罗斯文学不止普希金,他们的教育普及程度很高,人们说话喜欢引经据典,尤喜诗歌,所以基础的要知道。 俄罗斯也是一个十分有“传统”的国家,像中国一样。人们说话喜欢引用谚语俗语,而且谚语俗语很大程度上体现这个国家的一些精神。比如俄罗斯对“家国”的热爱,对“入侵”的反感,对“朋友”的珍视,都可以在谚语上体现出来,如果学俄语的话是一定要背的。 比如有句谚语:“ 不请自来的客人比鞑靼人还恶劣 !”(参见俄历史上蒙古鞑靼人入侵并统治240年) 他们喜欢熊,所以就有“ 熊在森林就好比主人在家 ”,认为熊聪明敏锐,守护家园,且憨态可掬。(熊真的对俄罗斯人十分重要,他们喜欢熊就像我们见到大熊猫就叫“好萌”是一个道理) 包括苏联那段历史,他们也会经常调侃,相关的谚语和俗语也很多。 先写这么多,吃饭去了。 =================================== 应 来补充,关于词源,没有具体研究过,还是请专业人士再细述一下吧。 据我所知的,俄语是 源于古斯拉夫语,尤其是东斯拉夫语 ,后来是教会斯拉夫语,规范了一部分古俄语,但那时候的俄语语法混乱,用词不准,真正 从普希金开始 ,是由他规范了“ 现代俄罗斯标准语 ”这也基本上是现在的俄语。 跟德语的渊源应该没有那么大的,当然印欧语系互相之间自然都有千丝万缕的联系,但就我所知对俄语的影响来说,德语肯定排不上。 希腊语倒是有联系的。俄语字母最开始的起源是 基里尔字母 ,就是来源于希腊语字母。 英语的影响如今确实越来越大了, 英语词汇入侵得厉害 ,不仅一些新兴词汇大多用英语代替,就连一些原本俄语用的,也弃之不用,用比较大众化的英语词汇。比如“志愿者”,俄语现在常用的волентёр就是根据volunteer变化而来,读音也差不多。现在这种词极多,已经蔓延在了各个领域。多出现在新兴事物上。但事实上,俄语本身有很多词的读音和英语就很像的——学小语种的人应该都有体会——这倒不全是英语的影响,只是英语和欧洲许多语言系出同源,这里不一定是互相影响。 Современные лексические и грамматические черты русского языка — результат длительного взаимодействия различных восточнославянских диалектов, распространённых на великорусской территории, и церковнославянского языка, возникшего в результате адаптации на русской почве языка первых христианских книг IX—XI вв. («старославянского языка»). 以上是维基百科的俄文版,大意是说:现代的俄语口语及书面语基本上是东斯拉夫语各地方言相互作用的结果,以及9~11世纪第一批教会书籍中对古俄语做了部分调整的教会斯拉夫语。 即是说,希腊语、东斯拉夫语、宗教(东正教)用语,都对俄语的形成产生影响。只是那些很古老了,许多用词也都已经被抛弃,如今的标准语是从普希金开始规范的。 我的所知基本就这些了,剩下等更专业的人来解答吧。", "meta": {"Prob":0.38267436731076065}} + {"id": 92661, "text": "你去过的哪些博物馆特别吸引你? 初步浏览了大神们的答案,我就简单阐述一下我去过的四个国内博物馆,上海博物馆、国家博物馆、南京博物院和苏州博物馆。 上海博物馆让我最震撼的是青铜器和字画,尤其是青铜器,基本上把每个青铜器都拍了下来。最喜欢的青铜器是编钟,就安静的陈列在那里,想到千年前有人敲响它,演奏着古朴的旋律,久久不想离开。在上博你可以看到唐寅、文征明的字画,那是用毛笔在宣纸上勾勒出的水墨线条。对了上博还有许多精美的瓷器,玉器,绝对值得一去。 国家博物馆,设计风格独特,在每个馆外也有展品。每个展馆不大,东西没有上博多。个人觉得国家博物馆最大的特色就是有许多国外展厅,包括非洲,俄罗斯,还有各国赠与中国领导人的纪念品。 南京博物院,印象不深,去的那天下雨,展品也很丰富,但是基本没印象了。印象最深的是有一条“仿古街”,还原了大概民国时的街道。南京博物院有个科技展区还挺好玩的。 苏州博物馆,值得一看的是场馆设计,藏品在看完上博后就没什么意思啦。不过离拙政园很近,可以顺便去看看。", "meta": {"Prob":0.31295625980305586}} + ``` + - 排除数据 + ```json lines + // 'text': #input text , 'prob': probability of being low quality + {"id": 904009, "text": "如何正确看待三生三世十里桃花电视剧打击盗版?首先,三生三世十里桃花这部剧说打击盗版这个行为是正确的,维护自己的版权。但是三生三世十里桃花是抄袭网络某耽美作者的啊,所以说抄耽就不算抄袭?原谅贫道孤陋寡闻,本身就是抄袭的现在你在这里告诉我你要打击盗版?!完全就是贼喊捉贼吧,哈哈哈哈哈哈这笑话我能笑一年听说现在要出电影了?和华胥引一起?希望不会撞车哈哈哈哈哈哈", "type": "qa", "source_dataset": "wg_batch2_zhihu", "title": "", "meta": {"prob": 0.8741011765270877}} + {"id": 904011, "text": "怎么样证明自己不是渣男?娶她。","meta": {"prob": 0.5239115483615828}} + ``` + - 分类器原理:基于词袋的Ridge Classfier + - 词袋One-Hot变量:文本中每个单词被表示为一个独热编码向量; + - Ridge Classfier:是一种带有L2 Norm的Logistic Classfier,通过使用 L2 正则化来约束模型回归系数大小,从而提高模型的泛化能力与稳定性的线性分类模型。 + - 注意:为保证内容过滤器的准确性,建议标注数据的规模在1W条以上 \ No newline at end of file diff --git a/flagdata/cleaner/docs/Text_Cleaner.md b/flagdata/cleaner/docs/Text_Cleaner.md new file mode 100644 index 0000000..6ffc209 --- /dev/null +++ b/flagdata/cleaner/docs/Text_Cleaner.md @@ -0,0 +1,40 @@ +# TextCleaner + +## Description + +FlagData TextCleaner offers a fast and extensible text data cleaning tool. It provides commonly used text cleaning modules. For additional text cleaning features, users can refer to the Operators section. + + +## Data Format + +We support input data in jsonl. As to jsonl format, each line contains a json with key-value pairs. + +``` +input format: +{ + "id": "bc8a8f4640b153ddaddf154a605fb461", + "text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)?\n 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。 覆盖率高,当然好,但工程类的东西往往需要妥协和平衡,时间不够时,先测什么,后测什么," +} +output format: +{ + "id": "bc8a8f4640b153ddaddf154a605fb461", + "text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)?\n 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。 覆盖率高,当然好,但工程类的东西往往需要妥协和平衡,时间不够时,先测什么,后测什么,", + "clean_text": "实际软件工程中是否真的需要100%代码覆盖率(code coverage)?\n 实际项目中,项目经理和架构师往往也是不错的测试员,一些严重bug,经常是他们先发现,比测试员还快一点。 项目中有很多的function, 但function之间的重要性是不同的,也就是说,是不均匀的,有的重要,有的没那么重要,同样是80%的覆盖率,一个覆盖到最重要的function,另一个没有,最后的结果也是天差地别的。 和覆盖率相比,更重要的是测试的顺序,确保最常用,最重要,最核心的功能先测试到,有bug,先发现,先解决,这样测试才高效,团队也会越测越有信心。 这也需要测试员对项目和需求有更深入的理解。" +} +``` + +## Default Processors + +We provide several useful filters by default. For more data cleaning methods, users can refer to the Operators section to add them. + +The default filters will do the following cleaning procedures: + +​ 1. Remove documents where the proportion of line break characters exceeds 0.25 of the total number of characters. + +​ 2. Remove sentences containing specific content. + +​ 3. Remove documents where the ratio of numerical characters is greater than 0.5. + +​ 4. Replace \xa0 and \u3000 with standard spaces and remove other invisible characters. + +​ 5. Remove multiple line spaces. \ No newline at end of file diff --git a/flagdata/cleaner/html_cleaner.py b/flagdata/cleaner/html_cleaner.py new file mode 100644 index 0000000..81031da --- /dev/null +++ b/flagdata/cleaner/html_cleaner.py @@ -0,0 +1,39 @@ +from flagdata.cleaner.utils.common_utils import end_clip, remove_specific_patterns, remove_control_chars, \ + remove_extraspace, remove_unwanted_lines, drop_docs_exceeding_newline_proportion, drop_doc_below_ratio +from flagdata.cleaner.utils.extractor import ExtractorPipeline, ContentExtractor, TimeExtractor, TitleExtractor + + +def predict(output_file, content): + with open(output_file, 'w', encoding='utf-8') as w_f: + try: + title_extractor = TitleExtractor() + time_extractor = TimeExtractor() + content_extractor = ContentExtractor() + # 创建 ExtractorPipeline 实例并传入提取器列表 + pipeline = ExtractorPipeline(title_extractor, time_extractor, content_extractor) + extracted_items, content = pipeline.extract(content) + except Exception as e: + print(f"Error decoding JSON: {e}") + if drop_docs_exceeding_newline_proportion(content): + return + if drop_doc_below_ratio(content): + return + content = end_clip(content) + content = remove_specific_patterns(content) + + content = remove_control_chars(content) + content = remove_extraspace(content) + content = remove_unwanted_lines(content) + w_f.write(content[1:]) + + +def read_file(file_path): + with open(file_path, 'r', encoding='utf-8') as file: + return file.read() + + +if __name__ == '__main__': + output_file = 'output/html_demo_output.txt' + input_file = 'input/html_demo_input.txt' + content = read_file(input_file) + predict(output_file, content) diff --git a/flagdata/cleaner/input/arxiv_demo_input.jsonl b/flagdata/cleaner/input/arxiv_demo_input.jsonl new file mode 100644 index 0000000..c7e7398 --- /dev/null +++ b/flagdata/cleaner/input/arxiv_demo_input.jsonl @@ -0,0 +1,100 @@ +{"id": "red-arxiv-0", "source_id": "red-arxiv_0_red-arxiv-0", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\n\\section{Introduction}\nArtificial intelligence has greatly impacted the drug discovery pipeline by achieving human-like performance in the field of retrosynthesis \\cite{seglerPlanningChemicalSyntheses2018}. Retrosynthesis is the task of breaking down a chemical compound recursively into molecular precursors until a set of commercially available building block molecules is found \\cite{seglerPlanningChemicalSyntheses2018, coreyLogicChemicalSynthesis1989}. Consequently, the goal is to provide a valid synthesis route for a molecule. Potential applications of these synthesis routes are suggestions for medical chemists on how to produce a molecule of interest \\cite{coleyMachineLearningComputerAided2018}, a foundation for autonomous chemistry \\cite{coleyRoboticPlatformFlow2019}, and using synthesizability as part of De Novo Drug Design \\cite{schneiderComputerbasedNovoDesign2005}.\n\nThe field of computational retrosynthesis prediction is separated into two different tasks \\cite{schwallerMachineIntelligenceChemical2022}. In single-step retrosynthesis, the goal is to find the likely precursors or reactants for a given product. In multi-step retrosynthesis planning, the goal is to find viable synthesis paths over multiple reaction steps. \n\nThe task of single-step retrosynthesis prediction is treated as a supervised learning task, commonly categorized as template-based or template-free \\cite{dongDeepLearningRetrosynthesis2021}. Template-based approaches use manually curated or data-driven reaction templates \\cite{thakkarArtificialIntelligenceAutomation2021}. These templates represent the general atoms and bond structures required to perform a reaction. Therefore the objective is to predict the most appropriate template to break down the molecule \\cite{seidlImprovingFewZeroShot2022, chenDeepRetrosyntheticReaction2021a}. Template-free approaches are treated as a sequence prediction problem predicting one token of a chemical SMILES vocabulary at a time \\cite{tetkoStateoftheartAugmentedNLP2020, irwinChemformerPretrainedTransformer2022}, drawing inspiration from natural language processing \\cite{vaswaniAttentionAllYou2017}. Recently variations of these two approaches have emerged. In semi-template-based \\cite{sachaMoleculeEditGraph2021a, wangRetroPrimeDiversePlausible2021a}, a molecule is first broken down into subparts, and then the subparts are rebuilt into chemically viable reactants. Lastly, though many models leverage the sequence-based SMILES notation, there are also attempts to utilize graph-based descriptors across these approaches, exploiting the advantages of a molecular graph \\cite{chenDeepRetrosyntheticReaction2021a, tuPermutationInvariantGraphtoSequence2022}.\n\nIn comparison to single-step retrosynthesis, multi-step retrosynthesis planning focuses on researching novel route search algorithms using a fixed single-step model to identify retrosynthetic disconnections. The pioneering work in the field uses neural-guided \\gls{MCTS} and a template-based approach to find synthesis routes \\cite{seglerPlanningChemicalSyntheses2018}. Instead of assessing the state value in the search tree at run-time, alternative methods use oracle functions to guide the tree search. These methods include \\gls{DFPN} search with edge cost, which combines classical \\gls{DFPN} with a neural heuristic \\cite{kishimotoDepthfirstProofnumberSearch2019}, and Retro*, which combines the A* path finding algorithm with a neural heuristic \\cite{chenRetroLearningRetrosynthetic2020}. Newer approaches use a template-free model, either combining neural-guided \\gls{MCTS} with reaction feasibility heuristics \\cite{linAutomaticRetrosyntheticRoute2020} or directly using synthesizability heuristics combined with a forward synthesis model \\cite{schwallerPredictingRetrosyntheticPathways2020}. Instead of using heuristics, self-play \\cite{silverMasteringGameGo2017}, learning a value function by letting an algorithm play the game of synthesis against itself, is an additional investigated approach \\cite{schreckLearningRetrosyntheticPlanning2019b, hongRetrosyntheticPlanningExperienceGuided2021, kimSelfImprovedRetrosyntheticPlanning2021}.\n\nMulti-step approaches repeatedly apply the chemical information stored in single-step retrosynthesis models. However, the relationship between single-step models and multi-step approaches is not reflected in contemporary research, treating both tasks as distinct entities. Even though multi-step algorithms require the use of single-step models, these single-step models are generally fixed. Similarly, single-step models are developed without evaluating their use in multi-step approaches. Therefore, an open question is how single-step retrosynthesis evaluation metrics translate to the multi-step domain \\cite{schwallerMachineIntelligenceChemical2022} and, consequently, how single-step models affect the synthesis route finding capabilities as part of a multi-step algorithm. In this work, we establish a bridge between single and multi-step retrosynthesis tasks by benchmarking the performance and transfer of different single-step retrosynthesis models to the multi-step domain. We show the impressive impact of the single-step model on multi-step performance but, more importantly, a disconnection between contemporary single-step and multi-step evaluation metrics.\n\n\\section{Methods}\nWe select three state-of-the-art retrosynthesis single-step models to compare their performance in the multi-step domain. The model selection is based on dominant contemporary neural network approaches, i.e., contrastive learning, sequence-to-sequence, and graph-based encoding, considering their respective top-1 to top-50 performance on the USPTO-50k single-step retrosynthesis benchmark \\cite{loweExtractionChemicalStructures2012, schneiderWhatWhatNearly2016}. Accordingly, the selected models are MHNreact \\cite{seidlImprovingFewZeroShot2022}, a contrastive learning approach, Chemformer \\cite{irwinChemformerPretrainedTransformer2022}, a sequence-to-sequence approach, and LocalRetro \\cite{chenDeepRetrosyntheticReaction2021a}, a graph-based approach. As an additional baseline, a template-based multi-layer perceptron approach \\cite{thakkarDatasetsTheirInfluence2020, genhedenAiZynthFinderFastRobust2020}, drawing inspiration from \\cite{seglerPlanningChemicalSyntheses2018}, is included since it is often used in multi-step retrosynthesis algorithms \\cite{seglerPlanningChemicalSyntheses2018, kishimotoDepthfirstProofnumberSearch2019, chenRetroLearningRetrosynthetic2020, kimSelfImprovedRetrosyntheticPlanning2021}. Given that we aim to evaluate the capacity of these single-step retrosynthesis models in multi-step retrosynthesis planning, we use the model hyperparameters suggested in their respective publications (Appendix \\ref{tab:ssm_hyperparam}) assuming the models are optimized for the single-step prediction task. The only exception is Chemformer, where we use beam size 50 to produce the single-step results and beam size 10, the publication default, for multi-step retrosynthesis planning.\n\nTo ensure the correct implementation of the single-step models and compare their single-step performance, we perform a 10-fold cross-validation by splitting the data into 80\\% training, 10\\% validation, and 10\\% test splits for each fold. Each model is trained using the train split, training is monitored using the validation split, and the test split is used for final evaluation. All models use the same data split for each fold, and the data is preprocessed according to the specifications of each model.\n\nEach single-step model is evaluated by measuring its accuracy and inference time. Single-step accuracy \\cite{dongDeepLearningRetrosynthesis2021} is defined as the percentage of target compounds for which the model finds the ground-truth reactants within the top-k, $k \\in \\{1,3,5,10,50\\}$, measuring the ability of the model to capture chemical reaction information. Inference time is defined as the time needed to produce retrosynthesis predictions for a set of molecules, measuring the ability of the model to provide predictions in a timely manner. In an ablation study, we measure the impact of the amount of evaluation data and batch size on the inference time. For the first, we measure the influence of doubling the evaluation data while using the default batch size (Appendix \\ref{tab:batch_sizes}), analyzing the scalability of the model. For the second, we measure the impact of setting the batch size to 1, replicating the necessary conditions for a multi-step search algorithm that can only explore one molecule per instance (e.g. \\cite{seglerPlanningChemicalSyntheses2018}).\n\nThe selected multi-step algorithms to evaluate the performance of the different single-step models are \\gls{MCTS} \\cite{seglerPlanningChemicalSyntheses2018}, which dynamically assesses the state values of the search tree at run-time, and Retro* \\cite{chenRetroLearningRetrosynthetic2020}, which instead uses an A* path finding algorithm in combination with an oracle function. \nIn the case of Retro*, we refrain from using the oracle function and rely only on the priors of the single-step model for initial cost estimation, given that the original oracle function is generally shown to have little impact \\cite{trippReEvaluatingChemicalSynthesis2022} and is trained on USPTO data, which could cause information leakage. We defer from using a self-play algorithm since it would be necessary to retrain the self-play algorithm per problem instance, i.e., the set of used building blocks.\n\nIn the first and second experiments, the search settings for \\gls{MCTS} and Retro* are set to a time limit of 1800 seconds (30 minutes) and 200 algorithm iterations per molecule (Appendix \\ref{tab:ms_hyperparam}), respectively. In a third experiment, the search settings for Retro* are set to a time limit of 28800 seconds (8 hours) to allow the single-step models to reach the maximum iteration limit (Retro*-extended) (Appendix \\ref{tab:ms_hyperparam}), given their potential slow inference times. This third experiment is only conducted with Retro* because the algorithm does not need to do multiple single-step model calls to evaluate a tree-search state. Thus the algorithm is more likely to allow the single-step model to reach the iteration limit. In this case, a single-step model call refers to the suggestion of multiple candidate reactants given a product. In all cases, we search up to a maximum route length, or tree depth, of 7 and use the Zinc stock of 17,422,831 building blocks \\cite{genhedenAiZynthFinderFastRobust2020}. All experiments are conducted by extending the open-source \\gls{AZF} multi-step retrosynthesis framework \\cite{genhedenAiZynthFinderFastRobust2020} to use alternative single-step models instead of the thus far implemented baseline template-based model.\n\nTo evaluate the performance of a single-step model within a multi-step setting, we measure the solvability of molecules when searching for a synthesis plan. Solvability is the percentage of test molecules for which a specific combination of search algorithm and single-step model can produce solved routes. A route is considered solved when all predicted leaf compounds are available within the building block stock. To further investigate the performance of single-step retrosynthesis models, we also analyze the average number of iterations carried out by the search algorithm, the average number of calls to the single-step model, and the average search time, calculated across the test compounds.\n\nThe data used for all experiments is USPTO-50k \\cite{schneiderWhatWhatNearly2016, loweExtractionChemicalStructures2012}, a commonly used dataset within the single-step retrosynthesis field. The dataset consists of 50,016 unique products and their respective reactants, where the randomly split dataset contains 40,012 training reactions, 5,002 validation reactions, and 5,002 test reactions. The multi-step evaluation is conducted on the products of the test set.\n\nSingle-step retrosynthesis models are trained and benchmarked on one Tesla V100 32GB GPU. In comparison, multi-step retrosynthesis experiments are evaluated using a high-performance CPU cluster to facilitate the parallelization necessary to evaluate an extensive set of molecules in an appropriate time frame.\n\\section{Results}\n\\subsection{Single-step retrosynthesis prediction}\nWe reproduce the performance of the selected single-step models using a 10-fold cross-validation with USPTO-50k. By averaging across the folds, we reproduce the results reported in Chemformer \\cite{irwinChemformerPretrainedTransformer2022}, MHNreact \\cite{seidlImprovingFewZeroShot2022}, and LocalRetro \\cite{chenDeepRetrosyntheticReaction2021a} (Fig. \\ref{fig:accuracy} and Appendix \\ref{tab:accuracy}). For all models the data split has no discernable effect on the accuracy, shown by the small standard deviation across all folds. Additionally, we calculate the performance of the baseline model implemented in \\gls{AZF} \\cite{genhedenAiZynthFinderFastRobust2020} as a benchmark to compare the single-step models. \nFor top-1 accuracy, Chemformer outperforms the other models, with an average accuracy of 54.7\\% (± 1.1\\%). LocalRetro and MHNreact follow this with 52.5\\% (± 0.7\\%) and 49.8\\% (± 0.8\\%) accuracy, respectively, and \\gls{AZF} performs notably worse with an accuracy of 43.3\\% (± 1.0\\%). This pattern, however, is not maintained across the top-k measures. Accuracy noticeably ascends within the top-3 for all models, LocalRetro seeing a +24.1\\% increase in accuracy to 76.6\\% (± 0.6\\%), similar to MHNreact with a +23.0\\% increase to 72.8\\% (± 1.0\\%). \\gls{AZF} has a +16.8\\% increase in accuracy to 60.0\\% (± 1.0\\%), and Chemformer has the smallest gain in accuracy with an +11.2\\% increase to 65.9\\% (± 1.0\\%). Within the top-50 predictions, LocalRetro shows 96.6\\% (± 0.3\\%) accuracy, followed by MHNreact with 93.3\\% (± 0.4\\%) accuracy, both showing similar profiles across the top-k. \\gls{AZF} notably increases its performance across top-3 to top-10, giving 78.1\\% (± 0.7\\%) accuracy at top-50. Surprisingly, Chemformer delivers the lowest accuracy in the top-50 of the models tested, with an accuracy of 73.3\\% (± 0.3\\%). Though Chemformer outperforms other models in top-1, it is less able to find the ground-truth reactants for the remaining products despite additional explored alternatives with higher top-k.\n\n\\begin{figure}[t]\n \\centering\n \\includegraphics[width=0.615\\textwidth]{accuracy.png}\n \\caption{Percentage of compounds for which single-step retrosynthesis models found the ground-truth reactants within the top-k (Accuracy) on USPTO-50k averaged across 10-fold cross-validation. The standard deviation over all folds is indicated by the colored error bands.}\n \\label{fig:accuracy}\n\\end{figure}\n\\begin{figure}[b]\n \\centering\n \\includegraphics[width=0.6\\textwidth]{times.png}\n \\caption{Influence of data and batch size (Appendix \\ref{tab:batch_sizes}) on inference time per molecule on USPTO-50k averaged across 10-fold cross-validation.}\n \\label{fig:times}\n\\end{figure}\nThe influence of increased data and decreased batch size on the single-step model inference time is examined since single-step models typically evaluate in batches (Fig. \\ref{fig:times}). Generally, there is a linear relationship when doubling the amount of inferred data. We observe that the inference time per molecule remains stable when the amount of test data increases, except for LocalRetro, which triples the inference time per molecule. \nIn contrast, by decreasing batch size to one, we emulate the conditions of the model call within multi-step retrosynthesis planning. Chemformer and MHNreact both substantially increase their average inference time per molecule. For Chemformer, the increase in inference time is discernible, reaching eight times compared to the default batch size. MHNreact has the most marked increase in inference time, with the change from batch size 32 to 1 leading to 18x longer inference time per molecule. On the other hand, the inference time per molecule of LocalRetro is hardly affected by this change.\n\n\\subsection{Multi-step retrosynthesis planning}\nIntroducing the single-step models into the selected search algorithms, \\gls{MCTS} generally performs worse than Retro* (Table \\ref{tab:ms}). In detail, Retro* performs better in terms of solvability, number of explored routes, and solved routes per molecule across nearly all tested single-step models. The only exception is Chemformer, which produces more solved routes per molecule (\\gls{MCTS}: 3.33, Retro*: 2.06) while using fewer model calls (\\gls{MCTS}: 8.48, Retro*: 14.4). However, Chemformer with \\gls{MCTS} still has a lower overall solvability (\\gls{MCTS}: 44.3\\%, Retro*: 53.4\\%). In essence, it produces multiple solved routes for a smaller subset of solved molecules. Retro*-extended reaches or improves the result of Retro*, given that single-step models have more time for inference. In detail, Chemformer and MHNreact achieve higher performance using Retro*-extended, leveraging more single-step model calls. In comparison, the baseline \\gls{AZF} model and LocalRetro do not utilize the added time with more single-step model calls as they already reach the 200 iteration limit within the 30 min time limit of Retro*, thus having similar performance using both settings.\n\nFor the overall best-performing search setting, Retro*-extended, the single-step model ranking in terms of solvability is LocalRetro (80.6\\%), Chemformer (65.6\\%), MHNreact (60.9\\%), and \\gls{AZF} (50.6\\%). However, high solvability does not always imply a high number of solved routes. For example, Chemformer has a higher solvability than MHNreact, yet produces a considerably lower number of solved routes per molecule (Chemformer: 8.04, MHNreact: 56.6). Moreover, a high number of explored routes is also not directly connected to a high solvability. For example, MHNreact explores the highest number of routes per molecule but performs only the third best in solvability since it solves comparably few explored routes.\n\nLastly, there are large disparities across the average search time per molecule. Chemformer is by far the slowest model (18737 sec), followed by MHNreact (8016 sec), both of which are considerably slower compared to LocalRetro (322 sec) and \\gls{AZF} (129 sec). Even with these extensive search times, Chemformer and MHNreact do not reach the same level of single-step model calls as LocalRetro and \\gls{AZF}.\n\nGenerally, LocalRetro outperforms other models in terms of solvability and number of solved routes while producing slightly fewer total explored routes than MHNreact and needing approximately 2.5x the time per molecule in comparison to the fastest \\gls{AZF} baseline.\n\n\\newcommand{\\ra}[1]{\\renewcommand{\\arraystretch}{#1}}\n\\begin{table*}[hbt]\n\\centering\n\\caption{\\label{tab:ms}Comparison of multi-step algorithm and single-step retrosynthesis model combinations on USPTO-50k test set (5,002 molecules). Bold numbers indicate the best performance across all experiments.\n}\n\\begin{tabular}{@{}crrcrrrr@{}}\\toprule\n& & \\multicolumn{1}{c}{Overall} & \\phantom{}& \\multicolumn{4}{c}{Average per Molecule} \\\\\n\\cmidrule{3-3} \\cmidrule{5-8}\n\\textbf{Algorithm} & \\textbf{Model} & \\begin{tabular}[c]{@{}l@{}} \\textbf{Solvability (\\%)} \\end{tabular} && \\begin{tabular}[c]{@{}l@{}}\\textbf{Explored} \\\\ \\textbf{Routes} \\end{tabular} & \\begin{tabular}[c]{@{}l@{}}\\textbf{Solved} \\\\ \\textbf{Routes}\\end{tabular} & \\begin{tabular}[c]{@{}l@{}}\\textbf{Search} \\\\ \\textbf{Time (s)}\\end{tabular} & \\begin{tabular}[c]{@{}l@{}}\\textbf{Model} \\\\ \\textbf{Calls} \\end{tabular} \\\\ \\midrule\n\\multirow{4}{*}{\\begin{tabular}[c]{@{}l@{}}\\gls{MCTS}\\end{tabular}} & \\gls{AZF} & 49.5 && 367 & 24.9 & 165 & 783 \\\\\n& Chemformer & 44.3 && 4.40 & 3.33 & 2475 & 8.48\\\\\n& LocalRetro & 71.5 && 86.7 & 27.4 & 1616 & 412\\\\\n& MHNreact & 44.4 && 7.11 & 2.50 & 1842 & 29.5\\\\ \\midrule\n\\multirow{4}{*}{\\begin{tabular}[c]{@{}l@{}}Retro*\\end{tabular}} & \\gls{AZF} & 50.6 && 2574 & 48.6 & 130 & 195\\\\\n& Chemformer & 53.4 && 39.7 & 2.06 & 1518 & 14.4\\\\ \n& LocalRetro & \\textbf{80.6} && 7792 & 149 & 335 & 193\\\\\n& MHNreact & 55.2 && 2818 & 17.6 & 1653 & 38.8\\\\ \\midrule\n\\multirow{4}{*}{\\begin{tabular}[c]{@{}l@{}}Retro*-extended\\end{tabular}} & \\gls{AZF} & 50.6 && 2567 & 48.5 & \\textbf{129} & 195\\\\\n& Chemformer & 65.6 && 224 & 8.04 & 18738 & 134\\\\\n& LocalRetro & \\textbf{80.6} && 7786 & \\textbf{151} & 322 & 193\\\\\n& MHNreact & 60.9 && \\textbf{8176} & 56.5 & 8016 & 180\\\\\n\\bottomrule\n\\end{tabular}\n\\end{table*}\n\n\\section{Discussion}\n\nWe show that a single-step retrosynthesis model can tremendously impact multi-step retrosynthesis planning, influencing the ability to solve products and successfully produce multiple solutions. Across all three experiments (\\gls{MCTS}, Retro*, Retro*-extended), the alternative single-step models mostly outperform the baseline \\gls{AZF} model. In \\gls{MCTS}, one single-step model, LocalRetro, shows a considerably higher solvability than \\gls{AZF}. In the case of Retro* and Retro*-extended, all models outperform \\gls{AZF}, particularly when given an extended time to carry out sufficient model calls. The generally best performing model is LocalRetro, which has outstanding solvability and provides the most solved routes per molecule across all multi-step retrosynthesis planning experiments, continually outperforming all other methods. We show that the exchange of the single-step model alone can improve solvability by +30.0\\%, reaching 80.6\\%, and triple the number of solved routes per molecule to 151. As such, the single-step model should be well considered when developing multi-step retrosynthesis planning approaches.\n\nGiven our results, no clear pattern supports the usage of single-step top-k metrics as a potential proxy measure for solvability in multi-step retrosynthesis. For single-step models, the accuracy ranking varies from top-1 to top-50 (Fig. \\ref{fig:accuracy} and Tab. \\ref{tab:accuracy}). However, these rankings, and their intermediates, are never matched by their respective multi-step solvability rankings (Tab. \\ref{tab:ms}). Hence, multi-step solvability does not solely depend on a singular single-step factor and should not be reduced to a singular top-k single-step metric.\n\nExclusively focusing on the top-1 accuracy of single-step models is especially problematic when transferring the model to a multi-step domain. Though Chemformer shows the highest top-1 accuracy, in multi-step experiments, it finds a comparatively low number of solved routes per molecule despite having the second highest solvability. A cause for this could be its single-step accuracy profile, going from the best performing to the worst performing model as top-k increases. This suggests that the model is proficient at predicting certain reactions but cannot find a diverse set of solutions. However, diverse solutions are beneficial for a tree-search setting, where up to 50 possible explorable route alternatives could be added per search iteration.\n\nImportantly, similar top-k accuracy profiles do not result in the same multi-step results. For example, MHNreact and LocalRetro have a similar top-k accuracy profile in single-step retrosynthesis but differ greatly in multi-step retrosynthesis planning. Though they explore a similar number of routes, MHNreact solves considerably fewer routes. This difference in performance may be explained by comparing Retro* and Retro*-extended, where MHNreact improves only slightly in solvability despite having considerably more time. Though MHNreact explores and solves many more routes in general, the difference in solvability shows that it can only do this for molecules it had already solved in the shorter time frame.\n\nSince multi-step solvability is not solely dependent on the top-k accuracy shown by the single-step models, other factors may contribute to their performance. For example, given that search algorithms generally have a limited run-time, single-step model inference times can greatly affect performance in multi-step retrosynthesis planning. To produce solved routes, single-step models must carry out as many single-step model calls as possible within an allocated time limit. If the inference time is too long, then the number of model calls will be limited, and as such, the number of explored routes will also be limited. This effect is evident in MHNreact and Chemformer, the models with the highest search times and lowest model calls across all experiments.\n\nSingle-step models typically evaluate in batches larger than one, however this does not currently transfer to multi-step retrosynthesis planning. For example, MHNreact has the fastest inference time when using its default batch size, but its inference time is considerably increased when reducing the batch size to one, the setting under which multi-step retrosynthesis planning is carried out. As such, single-step models may not reach their full potential in search algorithms due to slow inference times. \n\nNoteworthy, most single-step models are developed for GPU use, and CPU usage can hinder their inference speed. However, it is necessary to conduct the multi-step experiments in parallel on CPUs due to the thousands of target molecules to be solved since massive GPU parallelization is currently not available to the general research community. Consequently, models designed and optimized for the single-step prediction task may not perform well in multi-step retrosynthesis planning. Therefore, single-step model developers should take the potential multi-step application into account and optimize their methods accordingly.\n\nThere are more general aspects to consider when discussing the divide between single-step and multi-step retrosynthesis. Though USPTO-50k is the most commonly used dataset for benchmarking single-step retrosynthesis prediction models, it represents only a limited area of the chemical space such that the models and our results may not apply to a more expansive chemical domain. Moreover, USPTO-50k comprises only single-step reactions, and the produced routes cannot be compared to reference routes to evaluate their validity. Recently, new benchmarks have emerged to address the lack of multi-step reference data \\cite{genhedenPaRoutesFrameworkBenchmarking2022a}, so further work is required to quantify the produced routes. Ideally, one would assess these routes irrespective of a particular reference route since there are many potential valid routes for any target molecule. However, at present, this can only be addressed by a domain expert, an extremely time-intensive task. \n\nAdditionally, this work focuses on using single-step models within two selected search algorithms. However, other search approaches can also considerably impact multi-step retrosynthesis planning methods \\cite{hongRetrosyntheticPlanningExperienceGuided2021, kimSelfImprovedRetrosyntheticPlanning2021}. Thus finding the optimal combination of single-step and multi-step methods is yet to be explored and could have a substantial impact on synthesis planning in the future.\n\n\\section{Conclusion}\n\nIn this work, we bridge the gap between single-step retrosynthesis and multi-step retrosynthesis planning. By extending current state-of-the-art single-step models to the multi-step domain, we find no clear relationship between the single-step and multi-step benchmarking metrics, in particular single-step top-k accuracy and multi-step solvability. Additionally, we show the importance of developing single-step models for the multi-step domain, as single-step models can have an impressive impact on multi-step retrosynthesis planning performance. LocalRetro, the best performing single-step model, increases solvability by +30.0\\% to 80.6\\% and triples the number of solved routes compared to the most widely used model. Interestingly, LocalRetro outperforms other single-step models, even those with similar single-step accuracy profiles. Additionally, we analyze other potential factors involved in the translation between the two domains, most notably the inference time of the single-step model. Overall, we show there is no easy transfer of single-step retrosynthesis models to the multi-step retrosynthesis planning domain.\n\nWith this work, we provide an overview of how current state-of-the-art single-step models fare within contemporary search algorithms, however, we only evaluate a selected scope of single-step and multi-step combinations. In the future, more diverse chemical datasets need to be further explored to examine the applicability of these approaches beyond the USPTO-50k dataset.\n\nTo summarize, we show that single-step models should be developed and tested for the multi-step domain, and not as an isolated task, to successfully identify synthesis routes for molecules of interest. \n\n\\section*{Acknowledgements}\nThis study was partially funded by the European Union’s Horizon 2020 research and innovation program under the Marie Skłodowska-Curie Innovative Training Network European Industrial Doctorate grant agreement No. 956832 “Advanced machine learning for Innovative Drug Discovery”. Parts of this work were performed using the ALICE compute resources provided by Leiden University. We thank Dr. Anthe Janssen (Leiden Institute of Chemistry) for providing chemical feedback.\n\n\\bibliographystyle{IEEEtran}\n"} +{"id": "red-arxiv-1", "source_id": "red-arxiv_1_red-arxiv-1", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\subsection{Configuration Space, Time Alignment, and Dynamic Probability Surface}\n\\paragraph{Configuration space.}\nWe study the configuration space $\\mathbb{M}_s$ of a molecule. \nA molecule may take a particular configuration ${\\utwi{x}} = (x_1,\\, x_2, \\cdots, x_d) \\in \\mathbb{M}_s$.\nThe configuration of alanine dipeptide is fully characterized by $d=60$ features~\\cite{Li_Ma2016TPS}. \n $\\mathbb{M}_s$ therefore lies in a subspace of the Euclidean space $\\mathbb{R}^d$: $\\mathbb{M}_s \\subset \\mathbb{R}^d$, $d=60$. \n\n\n\n\n\\paragraph{Dynamic probability surface.}\nWe take the time $t$ as the $d+1$ dimension, and examine the space-time relationship of the configuration of the molecule.\nAt time $t$, each configuration $({\\utwi{x}};\\; t) = (x_1,\\, x_2, \\cdots, x_d;\\; t) $ lies in the time-configuration space $\\mathbb{M}_t \\subset \\mathbb{R}^{d+1}$, \nand \nhas a probability $f({\\utwi{x}};\\; t) \\in [0,1]$. Here the function\n$\nf: \\mathbb{M}_t \\rightarrow \\mathbb{R}_{[0,\\,1]}\n$\n assigns the probability value $f({\\utwi{x}}; t)$ to a specific time-configuration $({\\utwi{x}};\\; t)$. We study the topological structure of the dynamic probability surface $f({\\utwi{x}}; t)$ over $\\mathbb{M}_t$, with time as the $(d+1)$-th dimension.\n\n\\paragraph{Time Alignment.}\nThe dynamic probability surface is constructed from sampled molecular dynamics trajectories.\nAll trajectories start from reactant basin and end in the product basin. Each trajectory is time-stamped by the duration from the start of the simulation, which is termed the {\\it absolute time}. Isomerization occurs in individual trajectory at different absolute time (Fig.~\\ref{fig:TPSIllustration}A).\nSince the relevant event is the transition of isomerization, \nwe adjust the time with an offset so the transition time occurs at $t=0$ for each trajectory (Fig.~\\ref{fig:TPSIllustration}B). Based on the 1-dimensional reaction coordinate computed following~\\cite{wu2022rigorous}, we take the first time that the trajectory reaches the transition state as $t=0$.\nWhile it is not possible to conduct committor test for each of the $3\\times 10^6$ trajectories to determine the transition time, the occurrence of the isomerization is fully captured by the reaction coordinates. Hence, we take the transition time $t=0$ as the time when the predicted committor value $p_B$ is 0.5 using the 1-D reaction coordinate described in the work done by Wu et al.~\\cite{wu2022rigorous}, where the authors used a generalized work functional to summarize the mechanical effects of the couplings between different coordinates. Singular value decomposition (SVD) was then employed to extract the inherent structure of the generalized work function, from which the 1-dimensional reaction coordinate was approximated with high accuracy. This enabled identifications of the transition-state configurations where $p_B=0.5$ rapidly. More details can be found in~\\cite{wu2022rigorous}.\n\n\\subsection{Flux and Its Rotation} \\label{sec:flux}\n\n\n\\paragraph{Flux over the configuration space.} \nTo characterize molecular movement in the configuration space,\nwe study its dynamic fluxes.\nAt time $t$, a molecule takes the configuration ${\\utwi{x}}(t) \\in \\mathbb{M}_s \\subset \\mathbb{R}^d$ and has a velocity ${\\utwi{u}}(t) \\in \\mathbb{R}^d$.\n\n\nWe first take the Lagrangian view, namely, the viewpoint of trajectories, where we start to track the molecule at \nabsolute\ntime $t'=0$ along the trajectory currently located at ${\\utwi{x}}(0) \\equiv {\\utwi{x}}(t'=0)$ and float with this trajectory over time. \nThe flux of this trajectory $f({\\utwi{x}}(0),t')$ at time $t'$ is then:\n\\begin{equation}\n{\\utwi{J}}({\\utwi{x}}(0),\\, t') =\\rho\\cdot{\\utwi{u}}({\\utwi{x}}(0),\\, t'),\n\\end{equation}\nwhere $\\rho$ is the weight of the trajectory and ${\\utwi{u}}({\\utwi{x}}(0),\\, t')$ is the velocity of trajectory at time $t'$.\n\n\nWe then take the Eulerian view and consider the fluxes associated with molecules located at specified locations. \nWe consider a small fixed volume $\\Delta \\Omega \\subset \\mathbb{M}_s$ in the configuration space and measure the flux inside $\\Delta \\Omega$ at time $t$ \nafter time alignment.\nWe do so by taking trajectories that are traveling inside $\\Delta \\Omega$ at time $t$.\nThe total flux in $\\Delta \\Omega$ at time $t$ \nis then:\n\\begin{equation}\n{\\utwi{J}}_{\\Delta \\Omega}(t) \n= \\int_{{\\utwi{x}}(t) \\in \\Delta \\Omega} {\\utwi{J}}(t) d {\\utwi{x}} = \\int_{{\\utwi{x}}(t) \\in \\Delta \\Omega} \\rho\\cdot{\\utwi{u}}({\\utwi{x}}(0),\\,t) d {\\utwi{x}} ,\n\\end{equation}\nwhere ${\\utwi{x}}(t)$ is the location of the current flux line originate from ${\\utwi{x}}(0)$, which has a velocity of ${\\utwi{u}}(t)$ at time $t$ after alignment (Fig.~\\ref{fig:TPSIllustration}C).\n\n\nWe estimate the fluxes from trajectories sampled by molecular dynamics. In this study, all trajectories are\nproperly generated without bias and therefore of equal and constant weight proportional to $\\rho$.\nThis is, as the MD trajectories are sampled without bias, \n$\\rho$ is the same for all trajectories and is a constant \nover time.\nThe flux of the $i$-th trajectory at time $t$ is therefore:\n\\begin{equation}\n{\\utwi{J}}_i(t) =\\rho\\cdot{\\utwi{u}}_i(t),\n\\end{equation}\nand the flux ${\\utwi{J}}_{\\Delta\\Omega}(t)$ through a small volume $\\Delta \\Omega$ at time $t$ is:\n\\begin{equation}\n{\\utwi{J}}_{\\Delta\\Omega}(t)\n= \\sum_{{\\utwi{x}}_i(t) \\in \\Delta \\Omega} {\\utwi{J}}_i(t) \n= \\sum_{{\\utwi{x}}_i(t) \\in \\Delta \\Omega} \\rho\\cdot{\\utwi{u}}_i(t).\n\\label{eqn:flux-disc}\n\\end{equation}\nHere we set $\\rho = 1/N$, where $N$ is the total number of unbiased trajectories.\n\n\n\n\n\\paragraph{Rotation of the flux.}\nWe further study the rotation of the flux. Our goal is to accurately characterize the activation dynamics during the barrier crossing process. Here we introduce a rigorous concept of rotational flux based on differential form~\\cite{bachman2012geometric} and describe a method for its computation. To illustrate, let us examine a toy system of a velocity field over a 2-dimensional configuration space, where the velocity at each point ${\\utwi{x}}=$($x_1,x_2$) is ${\\utwi{u}}({\\utwi{x}})=(u_{x_1},u_{x_2})= (-x_2, +x_1$). This velocity field exhibits a constant counter clockwise rotation around the origin %\n(Fig.~\\ref{fig:CurlIllustration}). The rotation of the velocity field on the $x_1$--$x_2$ plane is calculated by the difference of the changes of $u_{x_2}$ in the $x_1$ direction $\\Delta u_{x_2}/\\Delta x_1$ (blue, Fig.~\\ref{fig:CurlIllustration}, right) \nwhere $\\Delta u_{x_2} >0$ and $\\Delta x_1 >0$, \nand changes in $u_{x_1}$ in the $x_2$ direction $\\Delta u_{x_1}/\\Delta x_2$ (red), where\n$\\Delta u_{x_1} <0$ and $\\Delta x_2 >0$. \nSpecifically, the rotation can be written as ($\\Delta u_{x_2}/\\Delta x_1 - \\Delta u_{x_1}/\\Delta x_2$). \n\n\\begin{figure}[!htbp]\n \\begin{center}\n \\includegraphics[width=0.99\\textwidth]{plots/illustration_Curl_2.png}\n \\end{center}\n %\n \\caption{\\sf Rotational flux in a 2-dimensional velocity vector field, where $u_{x_1}=-x_2$ and $u_{x_2}=+x_1$. The velocity field circles around the origin $(x_1,\\, x_2)=(0,0)$ in the counter clockwise fashion. The rotation value is the signed sum of changes of $u_{x_2}$ in the direction of $x_1$, and $u_{x_1}$ in the direction of $x_2$.} \\label{fig:CurlIllustration}\n\\end{figure}\n\n\nFor flux in high-dimensional space, we generalize the concept of rotation using differential form~\\cite{bachman2012geometric}. \nThe flux ${\\utwi{J}}_{\\Delta\\Omega}(t)$ inside the small volume $\\Delta \\Omega$ is represented by a $d$-dimensional vector \n${\\utwi{J}}_{\\Delta\\Omega}(t)=(J_{\\Delta\\Omega,\\,1}(t), \\cdots, J_{\\Delta\\Omega,\\,d}(t)) \\in \\mathbb{R}^d$. \nThis flux vector can be written as a $d$-dimensional $1$-form:\n\\begin{equation}\n{\\utwi{J}}_{\\Delta\\Omega}(t)=J_{\\Delta\\Omega,\\,1}(t) \\cdot dx_1+ \\cdots + J_{\\Delta\\Omega,\\,d}(t) \\cdot dx_d,\n\\end{equation}\nwhere $J_{\\Delta\\Omega,\\, i}(t) $ is the component of the flux in the $i$-th dimension.\nThe differential of this 1-form can be written as \n$$d{\\utwi{J}}_{\\Delta\\Omega}(t)=\n\\sum_{\\substack{i\\neq j \\\\i,j \\in \\{0,\\cdots, d\\}}}\n(\\frac{\\partial J_{\\Delta\\Omega,\\,j}(t)}{\\partial x_i} - \\frac{\\partial J_{\\Delta\\Omega,\\, i}(t)}{\\partial x_j}) \n\\; dx_i \\wedge dx_j$$ \nwhere $\\wedge$ is the wedge operator denoting the exterior product~\\cite{bachman2012geometric}. \nWith the above definitions, the rotation of the $d$-dimensional flux vector can be written as \n\\begin{equation}\n\\nabla \\times J(\\Omega,t)=<\\frac{\\partial J_{\\Delta\\Omega,j}(t)}{\\partial x_i} - \\frac{\\partial J_{\\Delta\\Omega,i}(t)}{\\partial x_j}>, \\quad i\\neq j \\,\\quad{\\rm{ and }}\\, \\quad i,j \\in \\{0,\\cdots, d\\}\n\\label{eqn:rot}\n\\end{equation}\nwhere \n$({\\partial J_{\\Delta\\Omega,j}(t)}/{\\partial x_i} - {\\partial J_{\\Delta\\Omega,i}(t)}/{\\partial x_j})$\nrepresents the counter clockwise rotation of the flux projected onto the $i$-$j$ plane, as shown in Fig.~\\ref{fig:CurlIllustration}.\n\n\n\n\n\n\n\n\n\\subsection{Topology of dynamic probability surfaces} \n\n\\paragraph{Homology groups and persistent homology.} In this study, we investigate global features of the occurrence of probability peaks in the\ntime-configuration space. \nOur approach is that of homology group~\\cite{munkres2018elements,hatcher2005algebraic} and persistent homology~\\cite{Edelsbrunner:2230405,Edelsbrunner2002, carlsson2009topology}. \nBelow we give a brief overview, as more detailed descriptions of this choice over that of critical points is described in ref~\\cite{Manuchehrfar_Activated}.\n\nHomology groups characterize holes of various dimension. \nPersistent homology quantifies the prominence of these holes. Here we focus on the isolated probability peaks, which are components or 0-dimensional holes when they are isolated, and the configurations where they reside on. \nAs an illustration, we envision that the probability landscape over the configuration space is flooded under the sea level. At the beginning, all mountain peaks on the probability landscape are below the sea level~(Supplementary movie 1). The sea level is then lowered gradually, with some peaks emerging above the sea. As the sea level further recedes, isolated mountain peaks may become connected by land-ridges.\nDepending on how much of the time-space configurations have probability above a given level, different peaks of probability over the configuration space may emerge. As the level is lowered, regions with probability greater than the given level enlarges. As a result, previously isolated peaks may become land-connected and become merged into one connected component.\n\n\n\\begin{figure}[!htbp]\n \\begin{center}\n \\includegraphics[width=0.99\\textwidth]{plots/Illustration_Filtration_2.png}\n \\end{center}\n \\caption{\\sf The probability landscape $f(\\mathbb{M})_t$ and the topology of its superlevel set $\\mathbb{M}_{t,\\,f\\ge}$. \n %\n (\\textbf{A}) The probability landscape and different sea levels. The superlevel set $\\mathbb{M}_{t,\\,f\\ge .}$ are regions in $\\mathbb{M}_t\\subset \\mathbb{R}^{(d+1)}$ whole probability height value is above the sealevel $(.)$.\n %\n (\\textbf{B}) At $f({\\utwi{x}})=a$, the whole probability landscape is below the sea level and $\\mathbb{M}_{t,\\,f\\ge a}=\\emptyset$. \n %\n At $f({\\utwi{x}})=b_1,\\, b_2,\\, b_3,\\, d_3$, and $d_2$, the topology of the superlevel set changes.\n At each of $b_1$, $b_2$, and $b_3$, a new peak shown as a white island emerges. At $d_3$ and $d_3$, two separate peaks become merged together. \n %\n (\\textbf{C}) The persistent diagram of the birth and death probabilities of the peaks. The sea levels of $b_1$, $b_2$, and $b_3$ are birth probabilities, and the sea levels of $d_3$ and $d_2$ are death probabilities.} \\label{fig:seaLevels}\n\\end{figure}\n\n\\paragraph{Superlevel sets and sublevel sets.}\nFormally, we can identify all $({\\utwi{x}};\\; t) \\in \\mathbb{M}_t$ with probability values \n$f({\\utwi{x}};\\; t)\\ge a$. They form the\n\\textit{superlevel set} $\\mathbb{M}_{t,\\; f\\ge a}$:\n$$\n\\mathbb{M}_{t,\\; f\\ge a} \\equiv \\{ ({\\utwi{x}};\\;t) \\in \\mathbb{M}_t| f({\\utwi{x}};\\;t) \\ge a\\} = f^{-1}([a,\\,1)).$$\nThe \\textit{sublevel sets} $\\mathbb{M}_{t,\\; f\\le a}$ is defined similarly: \n$$\n\\mathbb{M}_{t,\\; f\\le a} \\equiv \\{ ({\\utwi{x}};\\;t) \\in \\mathbb{M}_t| f({\\utwi{x}}) \\le a\\} = f^{-1}((0,\\,a]).\n$$\n\n\n\\paragraph{Time-space configurations as cubic complexes.}\nWe represent the $(d+1)$-dimensional time-space configuration space \n$\\mathbb{M}_t$ using cubic complexes~\\cite{kaczynski2006computational}. \nA $d$-dimensional cubic complex $K$ is the union of points, line segments, squares, cubes, and their $k$-dimensional counterparts glued together properly, where $k \\le d$ and all are of unit length, except points, which have no lengths.\n\n\\paragraph{Filtration.}\nWe examine the topological structures of probability peaks on the time-configuration space, and restrict ourselves to those whose probabilities are above certain level. By gradually adjusting this level, we can follow the details of topological changes.\nAs illustrated in Fig~\\ref{fig:seaLevels}A-\\ref{fig:seaLevels}B, the probability sea level at $f({\\utwi{x}}) = a$ covers the whole probability landscape. The domain of the portion of the landscape above the sea level does not exist and is therefore the empty set $\\emptyset$.\nWe gradually lower the sea level to value $b_1$, when the first peak emerges from the sea.\nThis sea level gives the birth of the first peak. \nAt this time, we have the superlevel set $\\mathbb{M}_{t,\\; f\\ge b_1}=\\{{\\utwi{x}} \\in \\mathbb{M}_t| f({\\utwi{x}}) \\ge b_1 \\}$, which are the set of configurations whose probability is \n$\\ge~b_1$.\nThey form the small white region shown in Fig~\\ref{fig:seaLevels}B (left, middle). \nWe further lower the sea level to $b_2$ when \nthe region associated with the first peak expands, and\nanother peak emerges above the sea. This is at the birth of the second peak (Fig~\\ref{fig:seaLevels}B, left, bottom). At this time, we have the superlevel set $\\mathbb{M}_{t,\\;f\\ge b_2}$. We then continue lowering the sea level to $b_3$, where a third peak emerges (Fig~\\ref{fig:seaLevels}B, right, top). We have $\\mathbb{M}_{t,\\;f\\ge b_3}$ at this level. \nWe continue this process until sea level reaches $d_3$, where the first and the third peaks are merged together by a land ridge that has just emerged above the sea level. This is at the probability value of the death location of the third peak (Fig~\\ref{fig:seaLevels}B, right, middle). At this sea level, we have $\\mathbb{M}_{t,\\;f\\ge d_3}$. We further decrease the sea level until we reach the sea level of $d_2$, when the second peak becomes merged with the other two peaks (Fig~\\ref{fig:seaLevels}B, right, bottom). At this level, we have $\\mathbb{M}_{t,\\;f\\ge d_2}$. \nAt each of these levels, the topology of the superlevel set changes, namely, we have in sequence one component, two components, three components, two components, and then one component again. These changes are captured by the changing 0-th homology groups or the Betti numbers we compute (see ref~\\cite{Cohen-Steiner-stability,Manuchehrfar_Activated} for more details).\n\nFormally, we have a descending sequence of probability values corresponding to the lowering sea level:\n$$\n1=a_0>a_1 > a_2 > \\cdots > a_n = 0,\n$$\nand the corresponding superlevel sets, or the domains of the part of the landscape above the sea level, which are subspaces of $\\mathbb{M}_t$:\n$$\n\\emptyset =\\mathbb{M}_{t,\\;0 }\n\\subset \\mathbb{M}_{t,\\;1} \n\\subset \\mathbb{M}_{t,\\;2} \n\\cdots\n\\subset \\mathbb{M}_{t,\\;n} = \\mathbb{M}_t. \n$$\nAs the full time-configuration space $\\mathbb{M}_t$ is represented by a cubic complex $K$, each superlevel set $\\mathbb{M}_{t,\\;i}$ is represented by a subcomplex $K_i \\subset K$, which can be derived from the original full complex $K$. The corresponding sequence of subcomplexes are:\n$$\n\\emptyset = K_0\n\\subset K_1 \\subset K_2 \\cdots \\subset K_n = K.\n$$\nThis sequence of subcomplexes forms a \\textit{filtration}. \n\n\\paragraph{Persistence and persistent diagram.}\nUpon changing the sea level so the corresponding subcomplex changes from $K_{i-1}$ to $K_i$, we may gain a new peak,\nor we may lose one when a peak is merged with another one. A peak (or a connected component) is \\textit{born} at $a_i$ if it is present in $K_i$ but absent in $K_{i-1}$ for any value of $a_{i-1} < a_i$. The peak \n\\textit{dies} at $a_i$ if it \nis present in $K_{i-1}$ but not at $a_i$ for any value of $a_{i-1} < a_i$. \nWe record the location and the value of $a_i$, namely, the corresponding $k$-cube and its probability value whose inclusion lead to the birth or the death event. \n \nThe prominence of the topological feature of a peak is encoded in its life-time or \\textit{persistence}. Denote the birth value and the death value of peak $i$ as $b_i$ and $d_i$, respectively.\nThe \\textit{persistence} of peak $i$ is $b_i-d_i$.\nIn the example shown in Fig.~\\ref{fig:seaLevels}C, the component associated with the first, second and third peak is born at $f({\\utwi{x}})=b_1$, $f({\\utwi{x}})=b_2$, and $f({\\utwi{x}})=b_3$, respectively.\nAt $f({\\utwi{x}})=d_2$, the first and the second components are merged together. That is, the second peak dies at $d_2$, and the persistence of this peak is $b_2-d_2$. At $f({\\utwi{x}})=d_3$, the first and the third component are merged together, and the third peak dies at $d_3$. The persistence of the third peak is therefore $b_3-d_3$. The first peak dies at $f({\\utwi{x}})=0$, and its persistence is $b_1-0=b_1$.\n\nWe record the birth and death events of the peaks in a two-dimensional plot, or the \\textit{persistent diagram}~\\cite{Cohen-Steiner-stability}. Each peak is represented by a point in this diagram, where the birth value $b_i$ and the death value $d_i$ are taken as its coordinates ($b_i,\\, d_i$). Fig.~\\ref{fig:seaLevels}C shows the persistent diagram of our illustrative example.\n\n\n\n\\paragraph{Computation.}\nWe use the cubical complexes described in ref~\\cite{wagner2012efficient} to calculate the persistent homology of the high dimensional time-evolving dynamic probability surface. The algorithm keeps track of changes in the super level set $\\mathbb{M}_{t,\\; f\\ge a}$ of the probability surface, and considers the birth and death of probability peaks.\n We neglect other topological properties such as 1-cycles. The locations ${\\utwi{x}}_s$ where birth and death events occur, namely, the corresponding $k$-cubes are also computed. Details and code are available at ``{https://github.com/fmanuc2/0-Homology-Group.git}''.\n \n\n\n\n\n\\section {Results}\n\n\\subsection{Model System and Computations.} \n\\paragraph{Molecular Dynamics Simulation.}\nSimulations were performed using the molecular dynamics software suite GROMACS-$4.5.4$~\\cite{Berk2008}, with implementation of transition path sampling reported in ref.~\\cite{Li_Ma2016TPS}.\nAmber94 force field was used in our simulations~\\cite{cornell1996second}. The structure of the alanine dipeptide was energy minimized using the\nsteepest descent algorithm and heated to $300$K using velocity rescaling, with a coupling constant of\n$0.3$ ps. The system was then equilibrated for $200$ ps. No constraints were applied. The time step\nof integration was $1$ fs. We then performed $2$ ns NVE simulation, such that we are able to harvest one\nreactive trajectory. The reactant basin $C_{7eq}$ was defined as $-3.49<\\phi<-0.96$ and $-1.57<\\psi<3.32$, and the product basin $C_{7ax}$ was defined as $0.87<\\phi<1.74$ and $-1.39<\\psi<0$~\\cite{Li_Ma2016_Reaction_Mechanism}. Given this initial reactive path, $3 \\times 10^6$ reactive trajectories were harvested through\ntransition path sampling.\nSpecifically, we randomly select one time point in the original reactive trajectory, exert a small\nperturbation to the momentum, then initiate simulation from this point both forward and backward\nin time. \nSimulations are performed with constant total energy of $36$KJ/mol, such that the average\ntemperature is 300 K in the transition path ensemble.\nThis is repeated until a new reactive trajectory is harvested~\\cite{Li_Ma2016TPS}. Each reactive trajectory is $2.5$ ps long, with the time step of $1$ fs. We then collect the configuration (conformation and velocity) at every step along each trajectory. All together, we have $7.5\\times 10^{9}$ conformations.\n\n\n\n\\paragraph{Constructing Time-Evolving Dynamic Probability Surface.} \nWe align the trajectories by the time of the occurrence of the transition, with the time $t$ at transition set to $t=0$. Conformations at the transition state have the appropriate values of the one-dimensional reaction coordinate as described in~\\cite{wu2021mechanism}. After alignment, we examine the time-interval of transition from $-2.5$ ps to $+2.5$ ps. \nWe construct the time-evolving dynamic probability surface \n$\\{ p({\\utwi{x}},\\, t) | ({\\utwi{x}},\\, t) \\in \\mathbb{M}_t\\}$ \nusing the $7.5\\times 10^{9}$ aligned and time-stamped conformations. \nBased on the analysis of reaction coordinates using the energy flow theory~\\cite{Li_Ma2016_Reaction_Mechanism}, we select the top-ranked $5$ dihedral angles ($\\phi$, $\\psi$, $\\theta_1$, $\\alpha$, $\\beta$) from the original $60$ spatial dimensions as the coordinates of $\\mathbb{M}_s$. \nAlong with time $t$, we have a $6$-dimensional probability surface $\\{ p_t({\\utwi{x}},\\, t) | {\\utwi{x}} \\in \\mathbb{M}_t\\}$. \nEach angle coordinate of ($\\phi$, $\\psi$, $\\theta_1$, $\\alpha$, $\\beta$) \nin units of radians\nis divided into 15 bins, and the time interval is divided into 500 bins, each of $10$ fs. \nThis discretization leads to to $15^5\\times 500= 379,687,500$ 6-dimensional hypercubes, where time is one of the dimension.\n\n\n\\paragraph{Computing Topological Structure of the Dynamic Probability Surface.} Persistent homology is computed using a $20$-core Xeon E5-2670CPU of 2.5 GHz, with a cache size of 20 MB and memory of 128 GB Ram. The computation time for finding the prominent peaks and ridges connecting them is about $10$ min.\n\n\\subsection{High Probability Reactive Region in Space-Time from Topology of Dynamic Probability Surface} \\label{Surf_topo}\n\n\\paragraph{High probability reactive region dominates in the configuration-time space during the transition.}\nIn a previous study, we showed that without time separation, the transition state conformations among the aggregation of \n$7.5\\times 10^{9}$ conformations during the period of $-2.5$ to $+2.5$ ps are\nconcentrated in a small reactive region of $\\phi \\times \\theta_1 = [-0.2,\\, +0.2] \\times [-0.1,\\, +0.1]$ (see Fig~\\ref{fig:topo_TimeAnd3}C for the $\\phi$ and $\\theta_1$ angles). \nThese reactive conformations pass the rigorous committer test and are at the transition state, and they form the most prominent peak with the largest probability mass outside the reactant and product basins~\\cite{Manuchehrfar_Activated}.\n \n\nWith time as the extra dimension, we now examine the detailed time sequences of the probability surface and determine how transition state conformations are distributed during this period of $5$ ps. This is captured by the 6-d space-time probability surface and its overall topological structure \n is summarized in the persistent diagram~(Fig~\\ref{fig:topo_TimeAnd3}B), with the projections of the surface on the $\\phi$--$\\theta_1$ and $\\phi$--$\\psi$ planes shown in Fig~\\ref{fig:topo_TimeAnd3}C-\\ref{fig:topo_TimeAnd3}D. \n\nOne prominent probability peak (Fig~\\ref{fig:topo_TimeAnd3}B, red dot) located in the region of \n$(\\phi,\\,\\theta_1) = [-0.2,\\, +0.2]\\times[-0.1,\\, 0.1]$ stands out, which occurs during the short time interval of $t \\in [-5,\\, +5]$ fs.\nThis is the reactive region where most probability mass of the transition state conformations accumulates. \nIt forms the dominating topological structure with the largest persistence in the whole configuration-time space. \nThe probability peak of time-aggregated conformations from $-2.5$ to $+2.5$ ps reported in ref~\\cite{Manuchehrfar_Activated}\nlargely arises from this short-durationed reactive probability peak. \nThat is, the dominant peak of ref~\\cite{Manuchehrfar_Activated} comes from the dominant peak occurring during $t = [-5,\\, +5]$ fs reported here.\n\n\nThere are 6 additional meta peaks at the next level of probability height (green dots) with much smaller persistence. Most of these occur near the transition state within $\\pm 0.2$ ps from $t=0$. The remaining 54 peaks are near either the reactant basins or the product basins, reflecting minor fluctuations within these stable regions.\n\n\n\\begin{figure}[!htbp]\n \\includegraphics[width=0.98\\linewidth]{plots/peaks_time_2.png}\n \n \\caption{\\sf The dynamic probability landscape and its topological structure.\n (\\textbf{A}) 3-d conformation of the alanine dipeptide before and after isomerization. \n (\\textbf{B}) The persistence diagram of probability peaks over the (time, $\\phi, \\psi, \\theta_1, \\alpha, \\beta$)-space, where the birth and death probability of each peak is shown.\n (\\textbf{C}) The 6-d landscape projected onto the $\\phi$--$\\theta_1$ plane. Here colored dots are the locations of probability peaks occurring at different time. \n The contour plot in the background depict the sea level of time-aggregated probability projected onto this 2-d plane, where brown and cyan color indicates high and low probability, respectively.\n %\n (\\textbf{D}) The landscape projected onto the $\\phi$--$\\psi$ plane. \n %\n (\\textbf{E}) The $\\phi$ coordinate of the probability peaks as time proceeds. $\\phi$ fluctuates in the reactant basin before transition occurs. During the transition period ($t=0$), $\\phi$ increases and subsequently reaches to the value of the product basin. \n %\n (\\textbf{F}) The $\\psi$ coordinate of the probability peaks as time proceeds. There is significant fluctuation in $\\psi$ before transition.\n %\n (\\textbf{G}) The $\\theta_1$ coordinate of the probability peaks as time proceeds. $\\theta_1$ fluctuates throughout the whole time.\n %\n As there are only a finite number of trajectories, we coarse grained each coordinate into 15 bins. As probability peaks in 6-D\n space are shown on 2D-planes, separate probability peaks in space or time may appear at the same coarse-grained locations in these 2D angle plots. The location of each peak, and its birth probability is shown in Supporting information table S1.\n }\n \\label{fig:topo_TimeAnd3}\n\\end{figure}\n\n\\paragraph{Probability peaks in the reactive region at the transition time.}\nThe locations of the probability peaks in\n $\\phi$, $\\psi$, and $\\theta_1$ \n against time $t$ are shown in\n Fig.~\\ref{fig:topo_TimeAnd3}E--\\ref{fig:topo_TimeAnd3}G. \nIn the $\\phi$ angle, minor probability peaks fluctuate around the reactant basin ($\\phi \\approx -2.0$) rad \nbefore reaching the transition state (Fig~\\ref{fig:topo_TimeAnd3}E).\nAt $<-0.5$ ps prior to the transition state, $\\phi$ increases rapidly to the value of the product basin\n($\\phi\\approx1.0$) rad, and fluctuates modestly afterwards. At $t=0$, the highest probability peak (red dot) occurs at $\\phi=0$. \nThe $\\psi$ angle fluctuates drastically around the reactant basin ($\\psi \\approx 0$) prior to the transition state (Fig~\\ref{fig:topo_TimeAnd3}F). \nNear the transition $t=0$, $\\psi$ decreases gradually to the value of product basin\n($\\psi\\approx 0.5$) rad and become more stabilized after the isomerization. \nIn the reaction coordinate $\\theta_1$, the probability peaks fluctuates significantly in a consistent manner throughout the entire $5$ ps, exhibiting an overall oscillating behavior (Fig.~\\ref{fig:topo_TimeAnd3}G).\n\n \n\nThe probability peaks are all small before the transition (blue), reflecting the fact that the molecular conformations prior to isomerization are diverse. \nAt the transition time $t=0$, a probability peak is located in the small region of $\\phi \\times \\theta_1 \\times \\psi = [-0.2,\\, +0.2]\\times[-0.1,\\, +0.1]\\times[-0.2,\\, +0.2]$ (Fig.~\\ref{fig:topo_TimeAnd3}E-\\ref{fig:topo_TimeAnd3}G, red). \nThis reflects the fact that most conformations on route to isomerization pass through a small reactive region in the configuration space. After the transition, probability peaks again become small (blue), reflecting the diverse conformations near the product basin. \n\n\nOverall, these results show that the transition state at $t=0$ has the highest probability peak, which is preceded and followed by smaller meta peaks (two before and seven after $t=0$), all within $\\approx \\pm0.4$ ps of the transition time. \nAt the reactant and product basins, molecular conformations are diverse, with a number of small probability peaks.\nAs the probability peak increases then decreases in height, there is \n consistent fluctuation in the $\\theta_1$ angle , while \nmoderate fluctuations occur before transition in $\\phi$ and in $\\psi$.\n\n\\paragraph{Relation between free energy surface and the dynamic probability surface.}\n\nThe potential energy surface of the alanine dipeptide isomerization in vacuum is as previously described in Ref.~\\cite{Bolhuis2000}. There are two prominent minima on the potential energy surface, associated with the reactant basin and the product basin. Their locations are identical to the locations of the reactant and the product basin on the dynamic probability surface reported in~\\cite{Manuchehrfar_Activated}. We have also determined the location of minima on the free energy surface, which are derived \nfrom a longer MD trajectory of $\\approx 15$ ns. The $3.0 \\times 10^7$ conformations taken from each $0.5$ fs intervals of the trajectory are harvested, from which the free energy surface is approximated~(Fig.~\\ref{fig:ActualPotential}A and \\ref{fig:ActualPotential}B).\nThe topological structure of the free energy surface is summarized in the persistent diagram of Fig.~\\ref{fig:ActualPotential}C. There are two prominent minima on the free energy surface, or equivalently, two high probability peaks on the probability surface, when examined over the 3-dimensional $\\phi$--$\\psi$--$\\theta_1$ space \n(red dots on Fig.~\\ref{fig:ActualPotential}A and \\ref{fig:ActualPotential}B). One is associated with the reactant basin (($\\phi$, $\\psi$, $\\theta_1$) $=$ $(1.25,\\,-0.84,\\,0.01)$ (labeled 1),\nand the other with the product basin $(-1.68,\\,0.42,\\,-0.18)$ (labeled 2). These locations \nare identical to the locations of the reactant and the product basins on the dynamics probability surface of reactive trajectories as reported in~\\cite{Manuchehrfar_Activated}. \n %\n Furthermore, \n there exists a probability peak located at the active region of ($\\phi$, $\\psi$, $\\theta_1$) $=$ $(0.00,\\,-0.42,\\,0.00)$ on the\ndynamic probability surfaces, regardless whether it is time-separated as discussed earlier or \nover the whole $2.5$ ps period~\\cite{Manuchehrfar_Activated}.\nHowever, there is no corresponding minimum on the free energy surface at this location.\n\n\\begin{figure}[!htbp]\n \\includegraphics[width=0.98\\linewidth]{plots/ActualPotentialSurface.png}\n \n \\caption{Free energy surface approximated from a long MD trajectory of $15$ ns plotted on {\\bf (A)} the $\\phi$--$\\theta_1$ plane and the {\\bf (B)} $\\phi$--$\\psi$ plane. Its persistent diagram {\\bf (C)} shows that there are two prominent minima on the free energy surface, or peaks on the probability landscape, which are associated with the reactant basin (labeled 1) and the product basin (labeled 2).}\n \n \\label{fig:ActualPotential}\n\\end{figure}\n\n\n\\subsection{Reactive Vortex Regions of \nHigh Probability Exhibit Strong\nNon-Diffusive Rotational Flux}\n\n\n\\paragraph{Flux and projection to the $\\phi$--$\\theta_1$ and $\\phi$--$\\psi$ planes.} \nWe study dynamic fluxes of molecular movement, which is calculated using Eqn~(\\ref{eqn:flux-disc}).\nWe first study the projection of the flux lines \nto the $\\phi$--$\\theta_1$ and $\\phi$--$\\psi$ planes (Fig~\\ref{fig:flux_TimeAnd3} and Supplementary movies~2-3), and examine how they are related to topological changes in the probability peaks \non the 6-d space of (time$, \\, \\phi,\\, \\psi, \\, \\theta_1, \\, \\alpha,\\, \\beta$).\nFor illustration, we take 3 time points before ($t= -700$ fs), at ($t= 0$ fs), and after ($t= +770$ fs) the transition. \n\n\\begin{figure}[!htbp]\n \\includegraphics[width=0.98\\linewidth]{plots/Flux_phi_psi_theta1_2.png}\n \n \\caption{\\sf Dynamic fluxes projected on the $\\phi$--$\\theta_1$ and the $\\phi$--$\\psi$ planes at three different times of before ($t=-700$ fs), at ($t=0$ fs), and after ($t=+770$ fs) the transition. The strongest portions of the flux lines are in red. Red dots are locations of probability peaks at the current time, and blue dots are the \n %\n location of peaks after $20$ fs.}\n \\label{fig:flux_TimeAnd3}\n\\end{figure}\n\nAt $t=-700$ fs, flux is present in the cubic region of $\\theta_1 \\in [-1.0, \\, +1.0]$, \\, $\\phi \\in [-3.0, \\, 0.5]$, and $\\psi \\in [-3.0, \\, +3.0 \\, ]$. \nUpon projection onto the $\\phi$--$\\theta_1$ plane, strong and uneven fluxes are located in a smaller rectangle of $\\theta_1 \\in [-0.2, \\, +0.2]$ and $\\phi \\in [-1.5, \\, -0.5]$ (green rectangular region in \nFig.~\\ref{fig:flux_TimeAnd3}A and red flux lines in Supplementary movie 2).\nThis is the same location where probability peak at $t=-700$ fs (Fig~\\ref{fig:topo_TimeAnd3}D and in Fig~\\ref{fig:topo_TimeAnd3}A). \n When projected to the $\\phi$--$\\psi$ plane, the flux is weak and even-valued (Fig.~\\ref{fig:flux_TimeAnd3}D and Supplementary movie 3).\n\n\nAt the transition time $t=0$, flux lines are the strongest on both the\n$\\phi-\\theta_1$ and the $\\phi-\\psi$ plane. They are in the direction of increasing $\\phi$ and decreasing $\\theta_1$, and slightly decreasing $\\psi$ (Fig.~\\ref{fig:flux_TimeAnd3}B, \\ref{fig:flux_TimeAnd3}E, and Supplementary movies~2-3). \nThis is the direction pointing from the reactant basin to the product basin.\nThe probability peak at $t=0$ is located at the center of the flux lines (red dots in Fig.~\\ref{fig:flux_TimeAnd3}B and~\\ref{fig:flux_TimeAnd3}E).\n\n\nAt $t=770$ fs after the transition, dynamic flux is found in the cubic region of $\\phi \\in [-0.5, \\, 2.0]$, $\\theta_1 \\in [-0.75, \\, 0.75]$, and $\\psi \\in [-3.0, \\, 1.5]$~(Fig~\\ref{fig:flux_TimeAnd3}C and ~\\ref{fig:flux_TimeAnd3}F). \nWhen projected onto the $\\phi$--$\\theta_1$ plane, the flux is uneven and is the strongest \naround the rectangle of $\\phi \\in [0.8, \\, 1.2]$ and $\\theta_1 \\in [-0.2, \\, 0.5]$\n(green rectangle, Fig~\\ref{fig:flux_TimeAnd3}C).\nIt is also uneven in the $\\phi$--$\\psi$ plane and is the strongest around the rectangle of $\\phi \\in [0.8, \\, 1.2]$ and $\\psi \\in [-1.5, \\, 0.0]$~(green rectangle, Fig~\\ref{fig:flux_TimeAnd3}F).\n\n\n\n\nOverall, these results show that the directional fluxes of molecular movement emerge during the transition period.\nFluxes are concentrated in the high probability reactive region in the configuration-time space\nand drive the probability peak of molecular configurations to future locations (red to blue dots, Fig.~\\ref{fig:flux_TimeAnd3}A-~\\ref{fig:flux_TimeAnd3}F).\nAt the transition time, they are the strongest and \n are in the general direction of moving molecules towards the product basin.\n \n\n\\paragraph{The reactive vortex region has strong rotational flux during transition.} \nWe further study the rotational flux of molecular movements during the transition.\nIts projections onto the $\\phi$--$\\theta_1$ and the $\\phi$--$\\psi$ planes are\n$({\\partial J_{\\Delta\\Omega,\\theta_1}(t)}/{\\partial \\phi} - {\\partial J_{\\Delta\\Omega,\\phi}(t)}/{\\partial \\theta_1})$\nand\n$({\\partial J_{\\Delta\\Omega,\\psi}(t)}/{\\partial \\phi} - {\\partial J_{\\Delta\\Omega,\\phi}(t)}/{\\partial \\psi})$, respectively~(Eqn~(\\ref{eqn:rot}))\n\nWe focus on the high probability reactive region and examine the rotational flux during the time interval of $-50$ fs and $+50$ fs in the reactive cubical region where \nmost probability mass is located~(Fig~\\ref{fig:HighResFlux_TimeAnd3}).\nWe divide the interval in each dimension of the cube $\\phi \\times \\theta_1 \\times \\psi \\in [-1.0,\\, 1.0]\n \\times[-0.5,\\, 0.5]\\times[-2.0,\\, 1.0]$ containing the reactive region into 250 bins and examine the flux and rotational flux in the $250^3=15,625,000$ cubes. \n\n\\begin{figure}[!htbp]\n \\includegraphics[width=0.98\\linewidth]{plots/High_res2_2.png}\n \\caption{\\sf The flux and its rotation during the transition at $t=-33$ fs~(A), $t= -42$ fs, $-15$ fs, $+2$ fs, and $+12$ fs~(B). \n %\n The flux lines are shown as blue lines, with the flux rotation coded by the color intensity, where darker blue represents stronger rotation. \n %\n (A) There is strong flux rotation in the plane of the two reaction coordinates of $\\theta_1$ and $\\phi$ (darker blue) at $t=-33$ fs (top), while flux rotation is negligible in the plane of $\\psi$ and $\\phi$ (bottom).\n %\n (B) Strong flux rotation presents at $t=-42$ fs, $-15$ fs, $+2$ fs, and $t= +12$ fs as the flux lines changes direction. In contrast, flux rotation remains negligible in the $\\phi$--$\\psi$ plane (supplementary movie 4), with the flux maintaining the same direction as in (A).\n Arrows in red represent the overall directions of the flux lines.\n }\n \\label{fig:HighResFlux_TimeAnd3}\n\\end{figure}\n\n\nThere are strong rotational fluxes in the $\\phi$--$\\theta_1$ plane, the two most important reaction coordinates~(Fig.~\\ref{fig:HighResFlux_TimeAnd3}A, top). \nThe flux exhibits significant changes in the direction of $\\theta_1$ as time proceeds while maintaining the same direction of increasing $\\phi$ (curved arrows, Fig.~\\ref{fig:HighResFlux_TimeAnd3}B, top, and supplementary movie 4). \nIn contrast, \n flux lines in the $\\phi$--$\\psi$ plane moves along a fixed direction of increasing $\\phi$ and decreasing $\\psi$. The rotational flux in this plane is negligible, even though $\\psi$ is the important coordinate that defines geometrically the reactant and product basin along with $\\phi$~(Fig.~\\ref{fig:HighResFlux_TimeAnd3}A, bottom). \nOverall, these results show there are strong vortexes in the reactive region.\n\n\n\n\\paragraph{Non-diffusive rotational trajectories in reactive vortex region dominate the transition process.} Our above analysis of flux over time are at $10$ fs resolution. As trajectories of molecular movement pass through the transition state rapidly, we now examine the behavior of trajectories during transition at finer resolution of $1$ fs. To gain further insight into the reactive vortex region, we study the behavior of trajectories of molecular movement and measure the number of times that each trajectory rotates during the short transition time interval of $[-100,\\, +100]$ fs. This is calculated by counting the number of times a trajectory re-enters the transition state region. \nHere we regard the small rectangle of $(\\phi, \\theta_1)=[-0.2,\\, 0.2]\\times[-0.1,\\, 0.1]$ as the reactive region, which is where the red dots in Fig.~5C and 5D are located. Results in ref~\\cite{Manuchehrfar_Activated} showed \nthat conformations of transition state ensemble \nare indeed located in this region, as these conformations pass the rigorous committor test (dashed red rectangle, Fig~\\ref{fig:rotation}A, see also discussion related to Fig.~\\ref{fig:topo_TimeAnd3}).\n %\nFig.~\\ref{fig:rotation}A shows example trajectories with different number of entrance and re-entrance to the reactive region. \n\n\\begin{figure}[!htbp]\n\\centering\n \\includegraphics[width=0.38\\linewidth]{plots/Trajectory_rotation.png}\n \\caption{\\sf Rotating trajectories in the reactive vortex region. \n %\n \\textbf{(A)} Examples of trajectories which enter the reactive region one, two, four, and six times. Here the reactive region of transition state region is indicated by the dashed red rectangle of $(\\phi, \\theta_1)=[-0.2,\\, +0.2]\\times[-0.1,\\, +0.1]$ as discussed in Fig.~\\ref{fig:topo_TimeAnd3} \n %\n and in~\\cite{Manuchehrfar_Activated}.\n %\n Each entrance/reentrance point of a trajectory is highlighted by a green circle.\n %\n \\textbf{(B)} Distribution of the number of entrance of rotating trajectories exhibit during the transition. Majority of trajectories enter into the reactive vortex region between three and five times, with a small proportion rotate more than six or less than three times.\n %\n \\textbf{(C)} Additional examples of trajectories circulate three, four, five, and six times around the reactive vortex region. Here dashed red circles highlight circles in each trajectory.}\n \\label{fig:rotation}\n\\end{figure}\n\n\nThe distribution of the number of times that trajectories enter the transition region is shown in Fig.~\\ref{fig:rotation}B for a sample of 100,000 trajectories.\nThe majority of them re-enter the transition region 3--6 times, with those re-enters 4 times occurring most frequently ($35.2\\%$). \n Trajectories entering the transition region 5, 3, and 6 times represent\n$20.0\\%$, $18.6\\%$ and $10.7\\%$ of the sampled trajectories, respectively.\nFig.~\\ref{fig:rotation}C show additional examples of\ntrajectories rotating inside the transition region moving in well-formed circles (3, 4, 5, and 6 times, respectively).\n\n\n\nThese results show that most trajectories in the reactive vortex region rotate multiple times, exhibiting strong non-diffusive rotational dynamics. There is a broad distribution in the number of re-entrance into the transition-state region, with the majority of them experiencing 3--6 rounds of rotations. \n\n\n\\paragraph{Rotational fluxes are important for barrier crossing.}\nThe flux lines in Fig~\\ref{fig:HighResFlux_TimeAnd3}B (e.g., $t=-33,\\, -42$ and $2$ fs) show that molecules generally move along in the direction that coincides with the isosurfaces of ensembles of conformations with the same committor value as described in ref~\\cite{wu2022rigorous}.\nThe rotational flux carries the molecules in the direction orthogonal to the direction of the isocommittor, indicating barrier crossing. \nAs representatives of most trajectories, the examples in Fig~\\ref{fig:rotation}A show that molecules\nmove rapidly in the general direction of isocommittor surfaces, but slowly in the orthogonal direction towards other isocommittor surfaces. The combination of these movements result in an overall spiral-like trajectories with elongated ellipse(s) drawn-out in the projection of $\\phi$--$\\theta_1$ reaction coordinates.\nDuring barrier crossing, the motion in $\\phi$ is assisted by $\\theta_1$, which transfers the potential energy it received from the thermal bath to $\\phi$ directly via kinetic energy as discussed in ref~\\cite{wu2022rigorous}. This leads to tight cooperative movements between $\\theta_1$ and $\\phi$, which is manifested as rotational flux. \n\n\n\n\n\n\\section {Discussion}\nThe transitions state theory has been the corner stone for understanding activated processes ranging from isomerization of simple organic molecules to complex protein conformational changes. Central to this theory \nare the transition state ensemble, namely, molecular conformations at the barrier top occupying the 1-degree saddle point of the free energy surface. \n\nDynamics of the transition state ensemble along the reaction coordinates is an important component of reaction rate theories. The default assumption for complex systems was based on Kramers’ physical intuition rather than systematic examination of the transition dynamics in realistic systems. As a result, the dynamics of transition of naturally occurring activated processes in complex molecules are largely unexplored. \n\n\nIn this study, we quantified the detailed topological structures of the dynamic probability surface of an activated process over the time-configuration space.\nWe use the alanine dipeptide isomerization in vacuum as our model system. The dynamic probability surface is constructed by harvesting naturally occurring trajectories of molecular movements connecting the reactant and the product basins.\nUnlike small molecules that require an external energy source, alanine dipeptide is the smallest complex system with an internal heat-bath composed of the large number of non-reaction coordinates. This heat-bath provides the necessary energy flow to facilitate the barrier-crossing process, an important property shared by proteins but absent in small molecules. \n\n\nOur results are based on rigorous analysis of the topological structures of the high-dimensional dynamic surfaces using persistent homology. In addition, we introduce a new method for quantifying high-dimensional flux rotations. \nThese techniques allowed us to uncover a number of important insights.\n\nFirst, the transition state ensemble of conformations are located in a reactive region in the configuration-time space and form the dominant probability peak. This finding extends earlier results of ref~\\cite{Manuchehrfar_Activated} and shows that after further separation of transition-state conformations along the time-axis, a single prominent probability peak occurring\nduring the short interval $t = [-5,\\, +5]$ fs\ndominate throughout the transition barrier-crossing process. That is, a strong reactive region with the highest probability peak exists in configuration-time, where transition state conformations as verified by the rigorous committor test accumulate are located. This region \nof short time duration dominates the whole transition process. \n\n\nSecond, there are strong directional fluxes in the high-probability reactive region. Molecules in this active region are not in equilibrium and are not diffusion-controlled. The fluxes adjust directions and become uniformly aligned at the transition time when they are the strongest, with the probability peak located at the center of flux lines. These fluxes occur primarily in the subspace of the reaction coordinates, and carry the molecular conformations forward.\n\n\nThird, the reactive region is characterized by strong vortexes. \nThere are strong rotational fluxes at the transition state, which occur in the subspace of the two most important reaction coordinates, but not in the subspace of the most important geometric coordinates. Most trajectories on route to the product basin rotate and enter the reactive vortex region multiple times. \nThese reactive trajectories move along rapidly in the direction of the surfaces of isocommittors, but slowly in the orthogonal direction to scale the barrier to the next isocommittor surface,\n drawing out spiral-like curves encircling ellipses elongated in the direction of the isocommittor surfaces. \nThe tight cooperative movements\nbetween reaction coordinate $\\theta_1$ and $\\phi$ are due to the transfer of potential energy $\\theta_1$ received from the thermal bath to $\\phi$. \nThe dynamic movements along the isocommittor surface and in the orthogonal direction of barrier-crossing are manifested as rotational fluxes in the plane of the reaction coordinates.\n\n\nOverall, our findings offers a first glimpse \ninto the reactive vortex region that characterizes the non-diffusive dynamics of barrier-crossing of a naturally occurring activation process.\nBy separating conformations along the time axis, we uncovered rich topological structures in the dynamic probability surface.\nSuch details are not possible when examining the free-energy surface and its 1-saddle point, where the dynamic aspects of the process are obscured.\n\nThe discovery of the reactive vortex region highlights the importance of analyzing the topological structures of the dynamics of the transition region in naturally occurring activated processes. With alanine dipeptide being the first system where non-diffusive behavior is established, it will be fruitful to study reactive dynamics of other naturally occurring activated processes of complex molecules. \nThe results can serve as the foundation towards developing a theoretical model of transition dynamics describing activated process occurring in nature.\n\n\nWhile our study does not directly provide physical quantities such as rate constants that correspond to experimental measurements, it is possible in principle to analyze how fluxes crosses dividing separatrix surface and to estimate the reaction rate as described in~\\cite{rosenberg1980isomerization,bose2017non,jang1992comment,zhao1993comment,nagahata2021phase}, provided one can precisely define the\nseparatrix surface and can accurate sample and quantify the fluxes.\n\n\n\n\n\n\\section*{Acknowledgement}\nWe thank Drs.\\ Hubert Wagner and Herbert Edelsbrunner for their generous help in extending the cubic complex algorithm. We also thank Dr.~Wei Tian for his help. \nThis work is supported by grants NIH R35\nGM127084 (to JL), NIH R01 GM086536 (to AM), and NSF CHE-1665104 (to AM).\n\n\\section*{Conflict of Interest Statement} \n\nThere are no conflict of interests.\n\n\n\n \n"} +{"id": "red-arxiv-2", "source_id": "red-arxiv_2_red-arxiv-2", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\\label{sec1}\n\nWhile majority of deep neural networks are trained on GPUs, they are increasingly being deployed on edge devices, such as mobile devices. These edge devices require \nto compress the architecture for a given hardware design (e.g. GPU or lower precision chips) due to memory and power constraints \\cite{benmeziane2021comprehensive, cheng2017survey}. Moreover, application specific hardware are being designed to accommodate the deployment of deep learning models. Thus, designing efficient deep learning architectures that are efficient for the deployment (i.e. \\emph{inference}) has become a new challenge in the deep learning community.\n\nThe combined problem of hardware and deep learning model design is complex, and the precise measurement of efficiency is both device and model specific. This is because researchers have to take into account various efficiency factors such as latency, memory footprint, energy consumption. Here we deliberately oversimplify the problem in order to make it tractable, by addressing a fundamental element of hardware cost. Knowing that power consumption is directly related to the chip area in a digital circuit, we use the chip area required to implement an arithmetic operation on a hardware as a surrogate to measure the efficiency of a deep learning model. While this is very coarse, and full costs will depend on other aspects of hardware implementation, it nevertheless represents a fundamental unit of cost in hardware design \\cite{hennessy2011computer}.\n\nIn a deep learning model, weights are multiplied by inputs, hence on of the fundamental operations in deep learning models is multiplication $S_{{\\mathrm{conv}}}(x,w) = wx$. In our work, we replace multiplication with the EuclidNet operator,\n\\begin{equation}\\label{eq: euclid}\n S_{{\\mathrm{euclid}}}(x,w) = -\\frac{1}{2}\\|x-w\\|_2^2.\n\\end{equation}\nwhich combines a difference with a square operator. We will refer to the family of deep learning models that use equation \\eqref{eq: euclid} as EuclidNets. \nThese models compromise between standard multiplicative models and AdderNets\\cite{chen2020addernet}, which remove multiplication entirely, but at the cost of a significant loss of accuracy and difficult training procedure. Replacing multiplication with square can potentially reduce the computation cost. The feature representation of each of the architectures is illustrated in Figure~\\ref{fig:feature}. EuclidNets can be implemented on 8-bit precision without loss of accuracy as demonstrated in Table~\\ref{tab: quant}. \n\n The square operator is cheaper than multiplication and it can also be implemented using look up tables \\cite{de2009large}. In \\cite{baluja2018no,covell2019table}, authors prove that replacing look up table can replace actual float computing, while works such as LookNN in \\cite{razlighi2017looknn} take the first step in designing hardware for look up table use. On a low precision hardware, we can compute $S_{\\mathrm{euclid}}$ for about half the cost of computing $S_{\\mathrm{conv}}$.\n\n Furthermore, using EuclidNets, the deep learning model does not lose expressivity, as explained ins Section \\ref{sec:theory}. \nTo summarize, we make the following contributions:\n\\begin{itemize}\n \\item We design a deep learning architecture based on replacing the multiplication $S_{\\mathrm{conv}}(x,w) = wx$ by the squared difference equation \\eqref{eq: euclid}. We show that using square operator can potentialy reduce the hardwaer cost. \n \\item These deep learning models are just as expressive as convolutional neural networks. In practice, they have comparable accuracy (drop of less than 1 percent on ImageNet on ResNet50 going from full precision convolutional to 8-bit EuclidNets).\n \\item We show theoretically and empirically that EuclidNets have the same behaviour compared to convolutional neural network in the case that the input is transformed (e.g. linear transformation) or affected by noise (e.g. Guassian noise).\n \\item We provide an easy approach to train EuclidNets using homotopy.\n\\end{itemize}\n\n\\begin{figure}\n \\centering\n \\includegraphics[width=0.32\\textwidth]{conv.png}\n \\includegraphics[width=0.32\\textwidth]{adder.png}\n \\includegraphics[width=0.32\\textwidth]{euclid.png}\n \\caption{Feature representation of traditional convolution with $S(x,w) = xw$ (left), AdderNet $S(x,w) = -\\|x-w\\|_1$ (middle), EuclidNet $S(x,w) = -\\frac{1}{2}\\|x-w\\|_2^2$ (right).} \n \\label{fig:feature}\n\\end{figure}\n\n\n\\begin{table}[h]\n\t\\caption{Euclid-Net Accuracy with full precision and 8-bit quantization: Results on ResNet-20 with Euclidian similarity for CIFAR10 and CIFAR100, and results on ResNet-18 for ImageNet. Euclid-Net achieves comparable or better accuracy with 8-bit precision, compared to the conventional full precision convolutional neural network. \n\t}\\label{tab: quant}\n\t\\centering\n\n\\begin{tabular}{ccc ccc}\n \n\t\n\t\\multirow{3}{*}{\\textbf{Network}} &\n\t\\multirow{3}{*}{\\textbf{Quantization}} &\n\t\\multirow{3}{*}{\\textbf{Chip Efficiency}} &\n\t\\multicolumn{3}{c}{\\textbf{Top-1 accuracy}} \\\\ \n\t&&& CIFAR10 & CIFAR100& ImageNet \\\\\n\t\\hline\n\n\t\\multirow{2}{*}{$S_{{\\mathrm{conv}}}$} &\n\tFull precision &\t\\xmark &\t92.9\n\t& 68.14 & 69.56 \\\\%69.8 \\\\ \n \t\t&\t8-bit &\t\\cmark &\t92.07 & 68.02 &\t69.59 \\\\\n\t\t\\multirow{2}{*}{$S_{{\\mathrm{euclid}}}$} &\n\tFull precision & \\xmark & 93.32 & 68.84 & 69.69 \\\\ \n\t\t&\t8-bit &\t\\cmark &\t93.30 & 68.78 & 68.59 \\\\\n\t\t\\multirow{2}{*}{$S_{{\\mathrm{adder}}}$} &\n\tFull precision & \\xmark & 91.84 & 67.60 & 67.0 \\\\ \n\t\t&\t8-bit &\t\\cmark &\t91.78 & 67.60 & 68.8 \\\\\n\t\t\t\\multirow{1}{*}{BNN} &\n\t1-bit & \\cmark & 84.87 & 54.14 & 51.2 \\\\\n\\end{tabular}\n\n\n\\end{table}\n\n \\section{Context and related work}\n \n \nCompressing deep learning models comes at the costs of accuracy loss, and increasing training time (to a greater extent on quantized networks) \\cite{frankle2018lottery, cheng2018model}. \nPart of the accuracy loss comes simply from decreasing model size, which is required for mobile and edge devices \\cite{wu2019machine}. Some of the most common deep learning compression methods include pruning \\cite{reed1993pruning}, quantization \\cite{guo2018survey}, knowledge distillation \\cite{hinton2015distilling}, and efficient design \\cite{iandola2016squeezenet,howard2017mobilenets,zhang2018shufflenet,tan2019efficientnet}. Between the compression methods, the most prominent approach is low bit quantization \\cite{guo2018survey}. In this case, the inference can speed up with lowering bit size, at the cost of \n accuracy drop and longer training times. In the extreme quantization, such as binary networks, operations have negligible cost at inference but exhibits a considerable accuracy drop \\cite{Hubara_BNN}.\n\nHere we focus on a small sub-field of compression, that optimizes mathematical operations in a deep learning model. This approach can be combined successfully with other conventional compression methods, such as quantization \\cite{xu2020kernel} and pruning \\cite{reed1993pruning}.\n\n \n On the other hand, knowledge distillation \\cite{hinton2015distilling} consists of transferring information from a larger teacher network to a smaller student network. The idea is easily extended by thinking of information transfer between different similarity measures, which \\cite{xu2020kernel} explores in the context of AdderNets. Knowledge distillation is an uncommon training procedure and requires extra implementation effort. However, EuclidNet preserves the accuracy without knowledge distillation. We suggest a straightforward training using a smooth transition between common convolution and Euclid operation using homotopy.\n\n \n\n\\section{Similarity and Distances}\n\n\\subsection{Inner Products versus Distances}\n\nConsider an intermediate layer of a deep learning model with input $x\\in{\\mathbb{R}}^{H\\times W \\times c_{{\\mathrm{in}}}}$ and output $y~\\in~{\\mathbb{R}}^{H\\times W \\times c_{{\\mathrm{out}}}}$ where $H,W$ are the dimensions of the input feature, and $c_{{\\mathrm{in}}}, c_{{\\mathrm{out}}}$ the number of input and output channels, respectively. For a standard convolutional network, we represent the input to output transformation via weights $w~\\in~{\\mathbb{R}}^{d\\times d\\times c_{{\\mathrm{in}}}\\times c_{{\\mathrm{out}}}}$ as\n\\begin{equation}\\label{eq: layer}\ny_{mnl} = \\sum_{i = m}^{m+d} \\sum_{j=n}^{n+d} \\sum_{k = 0}^{c_{{\\mathrm{in}}}} \nx_{ijk} w_{ijkl}\n\\end{equation}\nSetting $d=1$ reduces the equation \\eqref{eq: layer} to a fully-connected layer.\nWe can abstract the multiplication of the weights $w_{ijkl}$ by $x_{ijkl}$ in the equation above by using a similarity measure $S:{\\mathbb{R}}\\times{\\mathbb{R}}\\to{\\mathbb{R}}$. The original convolutional layer corresponds to \n$$\nS_{{\\mathrm{conv}}}(x,w) = xw.\n$$\nIn our work, we replace $S_{\\mathrm{conv}}$ with $S_{\\mathrm{euclid}}$, given by equation \\eqref{eq: euclid}. A number of works have also replaced the multiplication operator in deep learning models. \nThe most relevant work is the AdderNet \\cite{chen2020addernet}, which uses \n\\begin{equation}\\label{eq: adder}\nS_{{\\mathrm{adder}}}(x,w) = -\\|x-w\\|_1.\n\\end{equation}\nto replace multiplication by $\\ell_1$ norm, i.e. summation of the absolute value of differences. \nThis operation can be implemented very efficiently on a custom hardware, knowing that subtraction and absolute value of different $n$-bit integers cost $\\mathcal{O}(n)$ gate operations, compared to $\\mathcal{O}(n^2)$ for multiplication i.e. $S_{\\mathrm{conv}}(x,w) = xw$. However, AdderNet comes with a significant loss in accuracy, and is difficult to train. \n\n\\subsection{Other Similarity Measures}\n\n\nThe idea of replacing multiplication operations to save resources within the context of neural networks dates back to 1990s. Equally motivated by computational speed-up and hardware logic minimization, authors of \\cite{dogaru1999comparative} defined perceptrons that use the synapse similarity,\n\\begin{equation}\\label{eq: comp_syn}\nS_{{\\mathrm{synapse}}}(x,w) = \\sign(x)\\cdot \\sign(w) \\cdot \\min(\\|x\\|,\\|w\\|),\n\\end{equation}\nwhich is cheaper than multiplication in terms of hardware complexity.\n\nEquation \\eqref{eq: comp_syn} has not been experimented with modern deep learning models and datasets. Moreover, in \\cite{akbas2015multiplication} a slight variation is introduced which is also a multiplication-free operator,\n\\begin{equation}\\label{eq: mf}\nS_{{\\mathrm{mfo}}}(x,w) = \\sign(x)\\cdot\\sign(w)\\cdot(\\|x\\|+\\|w\\|)).\n\\end{equation}\nNote that both equations \\eqref{eq: comp_syn} and \\eqref{eq: mf} use $\\ell_1$-norm. Also note that in \\cite{mallah2018multiplication}, the updated design choice allows contributions from both operands $x$ and $w$. Furthermore, in \\cite{afrasiyabi2018non}, the similarity in image classification on CIFAR10 is studied. Other applications of equation \\eqref{eq: mf} are studied in \\cite{badawi2017multiplication, pan2019additive}. \n\n\n\nIn \\cite{you2020shiftaddnet}, the similarity operation is further combined with a bit-shift, leading to an improved accuracy with negligible added hardware cost. However, the accuracy results for AdderNet appear to be lower than those reported in \\cite{chen2020addernet}. Another follow-up work uses knowledge distillation to further improve the accuracy of AdderNets \\citep{xu2020kernel}. \n\nInstead of simply replacing the similarity on the summation, there is also the possibility to replace the full expression of equation \\eqref{eq: layer} as, for example, proposed in \\cite{limonova2020resnet,limonova2020bipolar}, by approximating the activation of a given layer with an exponential term. Unfortunately, these methods only lead to speed-up in certain cases and, in particular, they do not improve CPU inference time. Moreover, the reported accuracy on the benchmark problems is also lower than the typical baseline.\n\nIn \\cite{mondal2019dense}, authors used three layer morphological neural networks for image classification. Morphological neural networks were introduced in 1990s in \\cite{davidson1990theory, ritter1996introduction} and use the notion of erosion and dilation to replace equation \\eqref{eq: layer}:\n \\begin{align*}\n \\mbox{Erosion}(x,w) &= \\min_j S(x_j, w_j) = \\min_j (x_j - w_j), \\\\\n \\mbox{Dilation}(x,w) &= \\max_j S(x_j, w_j) = \\max_j (x_j + w_j).\n \\end{align*}\nThe authors proposed two methods by stacking layers to expand networks, but they admitted the possibility of over-fitting and difficult training issues, casting doubt on scalability of the method.\n\n\n\n\\section{Theoretical Justification}\\label{sec:theory}\nThis section provides some theoretical ground for the connections among AdderNets, EuclidNets, and conventional convolution.\n\\subsection{Equivalence with Multiplication}\\label{sec:theory_align}\n\n\nEuclidean distance has a close tie with multiplication and hence, it can replace the multiplications in convolution and linear layers. Here, we delve into the details of this claim a bit more.\n\nLet us consider Euclidean distance between the two vectors ${\\mathbf{x}}$ and ${\\mathbf{w}}$ as $\\norm{{\\mathbf{x}}-{\\mathbf{w}}}=({\\mathbf{x}}-{\\mathbf{w}}){^\\top}({\\mathbf{x}}-{\\mathbf{w}})$ where ${\\mathbf{x}}$ and ${\\mathbf{w}}$ are the vectors of inputs and weights respectively. Moreover, ${\\mathbf{x}}$ and ${\\mathbf{w}}$ are vectors of random variables, so it is of interest to study the expected value of the EuclidNet operation first,\n\\begin{equation}\n -\\frac 1 2 \\mathbb{E} \\norm{{\\mathbf{x}}-{\\mathbf{w}}} = -\\frac 1 2 \\mathbb{E}\\norm {\\mathbf{x}} - \\frac 1 2 \\mathbb{E}\\norm {\\mathbf{w}} + \\mathbb{E}({\\mathbf{x}}{^\\top}{\\mathbf{w}}) .\n \\label{eq:euclid_expect}\n\\end{equation}\nIn other words \\eqref{eq:euclid_expect}, convlution similarity measure, i.e. the inner product ${\\mathbf{x}}{^\\top}{\\mathbf{w}}$, is embedded in EuclidNet form. However, the result is biased with two extra terms i.e. $-\\frac 1 2 \\mathbb{E}\\norm{\\mathbf{x}}$ and $-\\frac 1 2 \\mathbb{E}\\norm{\\mathbf{w}}$. Thus we may conclude that Euclidean distance is aligned with multiplication shifted by two bias terms. The induced bias by the EuclidNet operation remains controlled in both training or inference, most deep learning models use some sort of normalization mechanism such as batch norm, layer norm, and weight norm. \n\nEuclidean distance also has a close relationship with cosine similarity. Let us define $S_{\\mathrm{cos}}$ as \n\n\\begin{equation}\n S_{\\mathrm{cos}}({\\mathbf{x}},{\\mathbf{w}}):= \\frac{{\\mathbf{x}}^\\top {\\mathbf{w}}}{\\rootnorm {\\mathbf{x}} \\rootnorm{\\mathbf{w}}}.\n \\label{eq:cosine_sim}\n\\end{equation}\nIt is easy to see that in the case of having a normalization mechanism (i.e. $\\rootnorm {\\mathbf{x}}=\\rootnorm {\\mathbf{w}}=1$) the cosine similarity and Euclid similarity become equivalent\n\\begin{eqnarray}\n S_{\\mathrm{euclid}}({\\mathbf{x}},{\\mathbf{w}})= S_{\\mathrm{cos}}({\\mathbf{x}},{\\mathbf{w}})-1 &\\mathrm{\\quad s.t.}& \\rootnorm {\\mathbf{x}} = \\rootnorm {\\mathbf{w}}=1 .\n \\label{eq:cosine_distance}\n\\end{eqnarray}\n\nMoreover, Euclidean norm is a transitive similarity measure since it satisfies the following inequality\n\n\\begin{equation}\n \\|{\\mathbf{x}}-{\\mathbf{w}}\\|_2 \\geq\\lvert~\\|{\\mathbf{x}}\\|_2-\\|{\\mathbf{w}}\\|_2 ~\\rvert .\n \\label{eq:inv_triangle}\n\\end{equation}\nIt is noteworthy to mention that this transitivity holds for p-norms (i.e. $\\|\\mathbf{a}\\|_p= (\\sum_i \\|{a}_i\\|^p)^{\\frac{1}{p}}$). This means that the AdderNet \\cite{chen2020addernet} operator is also transitive. According to equation \\eqref{eq:cosine_distance}, however, the only norm that has such a close relationship with the cosine similarity is Euclidean norm. This is the distinguishing feature of the EuclidNets that while they are distance based, and hence enjoy the transitivity property in measuring similarity, their performance is also completely aligned with those based on Cosine similarity. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\\subsection{Expressiveness of EuclidNets}\nDeep learning models that use the EuclidNet operation are just as expressive as those using multiplication. Note the polarization identity, \n\\[\nS_{\\mathrm{conv}}(x,w) = S_{\\mathrm{euclid}}(x,w) - S_{\\mathrm{euclid}}(x,0) - S_{\\mathrm{euclid}}(0,w)\n\\]\nwhich means that any multiplication operation can be expressed using only Euclid operations. \n\n\\subsection{Hardware cost}\n\nTraditionally, hardware developers \nuse smaller multipliers to create larger multipliers \\cite{de2009large}. They use various methods of multiplier tiling or divide and conquer to form larger multiplier. Karatsuba algorithm and its generalization \\cite{weimerskirch2006generalizations} is among the most known algorithms to implement large multipliers. Here we show that Euclidean distance can be potentially implemented with fewer multipliers in hardware.\n\nKaratsuba algorithm is a form of divide and conquer algorithm to perform $n-$bit multiplication using $m-$bit multipliers. Let us assume $a$ and $b$ are $n-$bit integer numbers and they can be re-written using two $m-$bits partitions\n\\begin{align}\n\\nonumber\n &a = a_1 \\times 2^m + a_2,\\\\ \\nonumber\n &b = b_1 \\times 2^m + b_2.\\\\ \n \\label{eq:karatsuba_parts}\n\\end{align}\nIn the case of multiplication, we have\n\\begin{align}\n\\nonumber\n &ab = (a_1 \\times 2^m + a_2) (b_1 \\times 2^m + b_2)\\\\ \\nonumber\n &~~~= 2^{2m}a_1b_1+2^m a_1b_2+2^m a_2b_1+a_2b_2,\\\\ \n \\label{eq:mult}\n\\end{align}\nwhich is comprised of \\textit{three} additions and \\textit{four} $m-$bits multiplications. However for the squaring operation, we have\n\\begin{align}\n\\nonumber\n &a^2 = (a_1 \\times 2^m + a_2) (a_1 \\times 2^m + a_2)\\\\ \\nonumber\n &~~~= 2^{2m}a_1^2+2^{m+1} a_1a_2+a_2^2,\\\\\n \\label{eq:square}\n\\end{align}\nwhich is comprised of \\textit{two} additions and \\textit{three} $m-$bits multiplications. Thus, the squaring operation can be cheaper in hardware. Also note that such divide and conquer techniques are used commonly in designing accelerator on FPGA targets.\n\n\n\n\\section{Training EuclidNets}\n\nTraining EuclidNets are much easier compared to other similarity measures such as AdderNets. This makes EuclidNet attractive for complex tasks such as image segmentation, and object detection where training compressed networks are challenging and causes large accuracy drops. However, EuclidNets are more expensive than AdderNets when using floating-point number format, however, their quantization is easy since, unlike AdderNets, they behave similar to traditional convolution to a great extent. In another words EuclidNets are easy to quantize.\n\n\nWhile training a deep learning model using EuclidNets, it is more appropriate to use the identity \n\\begin{equation}\n S_{{\\mathrm{euclid}}}(x,w) = -\\frac {x^2}{2} - \\frac{w^2}{2} + x w,\n\\end{equation} \nthat is more appropriate for GPUs that are optimized for inner product computations. As such, training EuclidNets does not require additional CUDA kernel implementation unlike AdderNets \\citep{cuda}. The official implementation of AdderNet \\citep{chen2020addernet} reflects order of $20\\times$ slower training than the traditional convolution on PyTorch. This is specially problematic for large deep learning models and complex tasks since even traditional convolution training takes few days or even weeks. EuclidNet training is about $2\\times$ slower in the worst case and their implementation is natural in deep learning frameworks such as PyTorch and Tensorflow. \n\n\n\n\n\n\\begin{table}[h]\\label{tab: times}\n\t\\caption{Time (seconds) and maximum training batch-size that can fit in a signle GPU \\textit{Tesla V100-SXM2-32GB}, during ImageNet training. In parenthesis is the slowdown with respect to the $S_{conv}$ baseline. \n\tWe do not show times for AdderNet, which is much slower than both, because it is not implemented in CUDA\n\t}\n\t\\centering\n\\begin{tabular}{cc l l cc}\n\t\\multirow{2}{*}{\\textbf{Model}} & \\multirow{2}{*}{\\textbf{Method}} & \\multicolumn{2}{l}{\\textbf{ Maximum Batch-size}} & \\multicolumn{2}{l}{\\textbf{Time per step}} \\\\ \n\t& & \\multicolumn{1}{l}{\\textbf{\\begin{tabular}[c]{@{}c@{}} power of 2\\end{tabular}}} & \\multicolumn{1}{l}{\\textbf{integer}} & \\textbf{Training} & \\textbf{Testing} \\\\ \\hline\n\t\\multirow{2}{*}{ResNet-18} & $S_{{\\mathrm{conv}}}$ & 1024 & 1439 & 0.149 & 0.066 \\\\ \n\t& $S_{{\\mathrm{euclid}}}$ & 512 & 869 ($1.7\\times$) & 0.157 ($1.1\\times$) & 0.133 ($2\\times$) \\\\ \\hline\n\t\\multirow{2}{*}{ResNet-50} & $S_{{\\mathrm{conv}}}$ & 256 & 371 & 0.182 & 0.145 \\\\ \n\t& $S_{{\\mathrm{euclid}}}$ & 128 & 248 ($1.5\\times$) & 0.274 ($1.5\\times$) & 0.160 ($1.1\\times$) \\\\ \\hline\n\\end{tabular}\n\n\n\\end{table}\n\n\nA common method in training neural networks is fine-tuning, that means initializing with weights trained on different data but with a similar nature. Here, we introduce the idea of using a weight initialization from a model trained on a related similarity measure. \n\nRather than training from scratch, we wish to fine-tune EuclidNet starting from accurate CNN weights. This is achieved by an ``architecture homotopy\" where we change hyperparameters to convert a regular convolution to an EuclidNet operation \n\\begin{equation}\nS(x,w; \\lambda_k) = xw - \\lambda_k\\frac{x^2 + w^2}{2},\\qquad \\mbox{ with }\\lambda_k = \\lambda_0 + \\frac{1 - \\lambda_0}{n} \\cdot k,\n\\label{eq: homotopy}\n\\end{equation} \nwhere $n$ is the total number of epochs and $0<\\lambda_0<1$ is the initial transition phase. Note that $S(x,w,0) = S_{{\\mathrm{conv}}}(x,w)$ and $S(x,w,1) =S_{{\\mathrm{euclid}}}(x,w)$ and equation \\eqref{eq: homotopy} is a convex combination of these two similarities. One may interpret $\\lambda_k$ as a scheduler for the homotopy, similar to the way learning rate is scheduled in training a deep learning model. We found that a linear scheduling as shown in equation \\eqref{eq: homotopy} is empirically effective.\n\nTransformations like equation \\eqref{eq: homotopy} are commonly used in scientific computing \\cite{allgower2003introduction}. The idea of using homotopy in training neural networks can be traced back to \\cite{chow1991homotopy}. Recently, homotopy was used in deep learning in the context of activation functions \\citep{pathak2019parameter,cao2017hashnet, mobahi2016training,farhadi2020}, loss functions \\citep{gulcehre2016mollifying}, compression \\citep{chen2019efficient} and transfer learning \\citep{bengio2009curriculum}. Here, we use homotopy in the context of transforming operations of a deep learning model.\n\nFine-tuning method in equation \\eqref{eq: homotopy} is inspired by continuation methods in partial differential equations. Assume $S$ is a solution to a differential equation with the initial condition $S(x,0) = S_0(x)$. In certain situations, solving this differential equation for $S(x,t)$ and then evaluating at $t=1$ might be easier than solving directly for $S_1$. One may think of this homotopy method as an evolution for deep learning model weights. At time zero the deep learning model consists of regular convolutional layers, but they gradually transform to Euclidean layers.\n\nThe homotopy method can also be interpreted as a sort of of knowledge distillation. Whereas knowledge distillation methods tries to match a student network to a teacher network, the homotopy can be seen as a slow transformation from the teacher network into a student network. Figure \\ref{fig: homotopy} demonstrates the idea. Interestingly, problems that have been solved with homotopy have also been tackled by knowledge distillation \\citep{hinton2015distilling,chen2019efficient,yim2017gift, bengio2009curriculum}.\n\n\\begin{figure}[t]\n\t\\begin{center}\n\t\\includegraphics[width=0.7\\linewidth]{homotopy_intuition}\n\t\\end{center}\n\t\\caption{Training schema of EuclidNet using Homotopy, i.e. transitioning from traditional convolution $S(x,w)=xw$ towards EuclidNet $S(x,w)=-\\frac{1}{2} |x-w|^2$ through equation \\eqref{eq: homotopy}.} \\label{fig: homotopy}\n\\end{figure}\n\n\\section{Experiments}\\label{sec:Experiments}\n\nTo illustrate performance of the EuclidNets, We apply our proposed method on image classification tasks. We also test our trained deep learning model under different transformations on the input image and compare the accuracy to standard convolutional networks.\n\n\\subsection{CIFAR10}\\label{sec: cifar10}\n\nFirst, we consider the CIFAR10 dataset, consisting of $32\\times32$ RGB images with 10 possible classifications \\citep{krizhevsky2009learning}. We normalize and augment the dataset with random crop and random horizontal flip. We consider two ResNet models \\cite{he2015deep}, ResNet-20 and ResNet-32.\n\nWe train EuclidNet using the optimizer from \\cite{chen2020addernet}, which we will refer to as AdderSGD, to evaluate EuclidNet under a similar setup. We use initial learning rate $0.1$ with cosine decay, momentum $0.9$, batch size 128 and weight decay $5\\times 10^{-4}$. We follow \\cite{chen2020addernet} in setting the learning-rate scaling parameter $\\eta$. For traditional convlutional network, we use the same hyper-parameters with stochastic gradient descent optimizer.\n\nThe details of classification accuracy is provided in Table \\ref{tab: cifar10}. We consider two different weight initialization for EuclidNets. First, we initialize the weights randomly and second, we initialize them with pre-trained on a convolutional network. The accuracy for EuclidNets has negligible accuracy loss compared to the standard ResNets. We see that for CIFAR10 training from scratch achieves even a higher accuracy, while initializing with convolution network and using linear homotopy training improves it even further.\n\n\\begin{table}[h]\n\\caption{Results on CIFAR10. The initial learning rate is adjusted for non-random initialization. \n}\n\\label{tab: cifar10}\n\\centering\n\t\t\\begin{tabular}{ccccccc}\n\t\t\t\\multirow{2}{*}{Model} & \\multirow{2}{*}{Similarity} & \\multirow{2}{*}{Initialization} & \\multirow{2}{*}{Homotopy} & \\multirow{2}{*}{Epochs} & \\multicolumn{2}{c}{Top-1 accuracy} \\\\ \n\t\t\t& & & & & CIFAR10 & CIFAR100 \\\\ \\hline\n\t\t\t\\multirow{4}{*}{ResNet-20} & $S_{{\\mathrm{conv}}}$ & Random & None & 400 & 92.97 & \\textbf{69.29} \\\\ \n\t\t\t& \\multirow{3}{*}{$S_{{\\mathrm{euclid}}}$} & Random & None & 450 & {93.00} & 68.84 \\\\ \n\t\t\t& & \\multirow{2}{*}{Conv} & None & 100 & 90.45 & 64.62 \\\\ \n\t\t\t& & & Linear & 100 & \\textbf{93.32}\n\t\t\t& 68.8\n\t\t\t\\\\ \\hline\n\t\t\t\\multirow{4}{*}{ResNet-32} & $S_{{\\mathrm{conv}}}$ & Random & None & 400 & \\textbf{ 93.93} & 71.07 \\\\ \n\t\t\t& \\multirow{3}{*}{$S_{{\\mathrm{euclid}}}$} & Random & None & 450 & 93.28 & \\textbf{71.22} \\\\ \n\t\t\t& & \\multirow{2}{*}{Conv} & None & 150 & 91.28 & 66.58 \\\\ \n\t\t\t& & & Linear & 100 & 92.62 & 68.42 \\\\ \\hline\n\t\t\\end{tabular}\n\n\\end{table}\n\n\nEuclidNets can become unstable during the training, despite careful choice of the optimizer. Figure \\ref{fig: train_comparison} shows a comparison of the EuclidNet training with a standard convolutional network. As it can be seen in the Figure \\ref{fig: train_comparison}, fine-tuning the EuclidNets directly from convolutional networks' weights is more stable than training from scratch. Also observe that when we train EuclidNets from scrach, accuracy is lower but the convergence is faster. Finally, using homotopy in the training procedure, the accuracy is improved. Note that the pre-trained convolution weights are commonly available in the most of neural compression tasks, so initializing EuclidNets with pre-trained convolution is a commonplace procedure in optimizing deep learning models for inference.\n\n\\begin{figure}\t\\centering\n\t\\includegraphics[width=0.7\\textwidth]{comp}\n\t\\caption{Evolution of testing accuracy during training of ResNet-20 on CIFAR10, initialized with random weights, or initialized from convolution pre-trained network. Initializing from a pretrained convolution network speeds up the convergence. EuclidNet is harder to train compared with convolution network when both initialized from random weights.}\\label{fig: train_comparison}\n\\end{figure}\n\nEuclidNets are not only faster to train compared to other norm based similarity measures, but also stand superior in terms of accuracy. AdderNet performs slightly worse in terms of accuracy and also is much slower to train. The accuracy is significantly lower for the synapse \\cite{dogaru1999comparative} and the multiplication-free \\cite{akbas2015multiplication} operators. Table \\ref{tab: sim_comparison} demonstrates a top-1 accuracy comparison of different methods. The reported results on AdderNet are from \\cite{xu2020kernel}. Note that although for AdderNet in \\cite{xu2020kernel}, authors used knowledge distillation to close the gap with the full precision, it still falls short compared with EuclidNet. \n\n\\begin{table}\n\t\\caption{Full precision results on ResNet-20 for CIFAR10 for different multiplication-free similarities.}\n\t\\label{tab: sim_comparison}\n\t\\centering\n\t\\begin{tabular}{ c c c c c c}\n\t\t\\multirow{1}{4em}{\\textbf{Similarity}} & \\multirow{1}{3em}{$S_{{\\mathrm{conv}}}$} & \\multirow{1}{3em}{$S_{{\\mathrm{euclid}}}$} & $S_{{\\mathrm{adder}}}$ & $S_{{\\mathrm{mfo}}}$ & $S_{{\\mathrm{synapse}}}$ \\\\ \n\t\t\\hline\n\t\t\\textbf{Accuracy} & 92.97\n\t\t& \\textbf{93.00\n\t\t& 91.84\n\t\t& 82.05 & 73.08\t\t \n\t\t\\\\ \n\t\\end{tabular}\n\\end{table}\n\n\nTraining a quantized $S_{{\\mathrm{euclid}}}$ is very similar to convolutional neural networks. This allows a wider use of such models for lower resource devices. Quantization of the EuclidNets to 8bits keeps the accuracy drop within the range of one percent \\citep{wu2020integer} similar to traditional convolutional neural networks. Table \\ref{tab: quant} shows 8-bit quantization of EuclidNet where the accuracy drop remains negligible. Furthermore, training EuclidNets on CIFAR100 dataset exhibits a negligible accuracy drop when the weights are initialized with pre-trained standard model weights. \n\n\n\\subsection{ImageNet}\n\nNext, we consider testing EuclidNet classifier on ImageNet \\cite{imagenet_cvpr09} which is known to be a challenging classification task comparing to CIFAR10. We trained our baseline convolutional neural network with standard augmentations of random resized crop and horizontal flip and normalization. We consider ResNet-18 and ResNet-50 models with the same hyper-parameters as those used in Section \\ref{sec: cifar10}.\n\nTable \\ref{tab: in} shows top-1 and top-5 classification accuracy of ImageNet dataset. As shown in Table \\ref{tab: in}, the accuracy of EuclidNet when it is trained from scratch is lower than the baseline emphasizing the importance of homotopy training. We believe that the accuracy drop with no homotopy is because the hyper-parameter tuning is harder for large datasets such as ImageNet. This means that even though there exists hyper-parameters that achieve equivalent accuracy with random initialization, however it is too difficult to find them. Thus, it is much easier to use the existing hyper-parameters of traditional convolutional neural network, and use homotopy to smoothly transfer the weights to wights that are suitable for EuclidNets. \n\n\\begin{table}[h]\n\t\\centering\n\t\t\\caption{Full precision results on ImageNet. Best result for each model is in bold.}\\label{tab: in}\n\t\\scalebox{0.8}{\n\t\\begin{tabular}{ccccccc}\n\t\tModel & Similarity & Initialization & Homotopy & Epochs & \\multicolumn{1}{l}{Top-1 Accuracy} & \\multicolumn{1}{l}{Top-5 Accuracy} \\\\ \\hline\n\t\t\\multirow{6}{*}{ResNet-18} & $S_{{\\mathrm{conv}}}$ & Random & None & 90 & 69.56 & 89.09 \\\\ \\cline{2-7} \n\t\t& \\multirow{5}{*}{$S_{{\\mathrm{euclid}}}$} & Random & None & 90 & 64.93 & 86.46 \\\\ \\cline{3-7} \n\t\t& & \\multirow{4}{*}{Conv} & None & 90 & 68.52 & 88.79 \\\\ \\cline{4-7} \n\t\t& & & \\multirow{3}{*}{Linear} & 10 & 65.36 & 86.71 \\\\ \n\t\t& & & & 60 & 69.21 & 89.13 \\\\ \n\t\t& & & & 90 & \\textbf{ 69.69} & \\textbf{ 89.38} \\\\ \\hline\n\t\t\\multirow{6}{*}{ResNet-50} & $S_{{\\mathrm{conv}}}$ & Random & None & 90 & 75.49 & 92.51 \\\\ \\cline{2-7} \n\t\t& \\multirow{5}{*}{$S_{{\\mathrm{euclid}}}$} & Random & None & 90 & 37.89 & 63.99 \\\\ \\cline{3-7} \n\t\t& & \\multirow{4}{*}{Conv} & None & 90 & 75.12 & 92.50 \\\\ \\cline{4-7} \n\t\t& & & \\multirow{3}{*}{Linear} & 10 & 70.66 & 90.10 \\\\ \n\t\t& & & & 60 & 74.93 & 92.52 \\\\ \n\t\t& & & & 90 & \\textbf{ 75.64} & \\textbf{ 92.86} \\\\ \\hline\n\t\\end{tabular}\n\t}\n\\end{table}\n\n\n\\subsection{Transformation and blurring}\n\nHere we provide empirical evidence that Euclidean norm is aligned with the multiplication. First, we show that EuclidNets perform as well as standard convolutional neural networks in the case of \\textit{pixel transform}. Second, we show that when the image is blurred with Guassian noise, EuclidNets closely follow the behaviour of the convolutional neural networks.\n\n\\subsubsection{Pixel transformation}\nWe define pixel transformation of an image as\n\\begin{equation}\n\\mathbf{I_T} = a\\mathbf{I}+b,\n\\label{eq:transform}\n\\end{equation} \nwhere $\\mathbf{I}$ is a tensor representing the original image, scalars $a$ and $b$ are transformation parameters, and $\\mathbf{I_T}$ is the transformed image. Note that in \\eqref{eq:transform}, $a$ controls the contrast and $b$ controls the brightness of the image. Such transformations are widely used in various stages of the imaging systems for instance in color correction, and gain-control (ISO).\n\nFigure \\ref{fig:transform} shows the accuracy of the standard ResNet-18 and EuclidNet ResNet-18 when the input image is affected by the pixel transformation of equation \\eqref{eq:transform}. We can see that when changing $a$ and $b$, EuclidNet ResNet-18 closely follow the accuracy of the standard ResNet-18.\n\n\n\\begin{figure}[t]\n \\centering\n \\includegraphics[width=0.45\\textwidth]{Conv_linear_t.png}\n \\includegraphics[width=0.45\\textwidth]{Euclid_linear_t.png}\n \\caption{Accuracy of CIFAR10 classification affected by pixel transformation for a standard ResNet-18 (left) and EuclidNet ResNet-18 (right).} \n \\label{fig:transform}\n\\end{figure}\n\\begin{figure}[t]\n \\centering\n \\includegraphics[width=0.45\\textwidth]{conv_blur_acc.png}\n \\includegraphics[width=0.45\\textwidth]{euclid_blur_acc.png}\n \\caption{Accuracy of CIFAR10 classification affected by Guassian noise for a standard ResNet-18 (left) and EuclidNet ResNet-18 (right).} \n \\label{fig:noise}\n\\end{figure}\n\n\\subsubsection{Gaussian Blurring}\n\nAdditive noise can be injected to an image in different stages of the imaging system due to faulty equipments or environmental conditions. We tested EuclidNet when the input image is affected by a Guassian additive noise. Figure \\ref{fig:noise} demonstrates comparison of the standard ResNet-18 and EuclidNet ResNet-18 for different noise intensities i.e. $\\sigma$ and kernel sizes. This experiment is done for classification of the CIFAR10 dataset. We can see that EuclidNet ResNet-18 closely follow the behaviour of the standard ResNet-18 in the case of different kernel sizes and noise intensities.\n\n\n\n\n\n\n\n\\section{Conclusion}\n\nEuclidNets are a class of deep learning models in which the multiplication operator is replaced with the Euclidean distance. They are designed to be implemented on application specific hardware, with the idea that subtraction and squaring are cheaper than multiplication when designing efficient hardware for inference. \nFurthermore, in contrast to other efficient architectures that are difficult to train in low precision, EuclideNets are easily trained in low precision. \n EuclidNets can be initialized with pre-trained weights of the standard convolutional neural networks and hence, the training procedure of EuclidNets using homotopy is considered as a fine tuning of convolutional networks for inference. The homotopy method further improves training in such scenarios and training using this method sometimes surpass regular convolution accuracy. .\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\begin{appendices}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\n\n\t\n\t\n\t\n\t\n\t\n\t\n\n\t\n\t\n\t\n\n\n\n\t\t\t\t\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\n\n\n\n\n\n\n\\end{appendices}\n\n\n"} +{"id": "red-arxiv-3", "source_id": "red-arxiv_3_red-arxiv-3", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\n\nSuppose that we are given the following partial differential equation~(PDE) for $u(x,t)$:\n\\begin{equation}\n\\label{diffusion-wave-PDE}\n\\D{}{}{}{2 \\nu} u = \\kappa \\frac{\\partial^2 u}{\\partial x^2}, \\quad x \\in \\mathbb R, \\quad t > 0,\n\\end{equation}\nwhere $\\kappa > 0$ and $0 < \\nu \\le 1$. The `time-fractional derivative operator'~$\\D{}{}{}{2 \\nu}$ is such that \\eqref{diffusion-wave-PDE} reduces to the diffusion equation and the wave equation when $\\nu = \\frac{1}{2}$ and $\\nu = 1$, respectively. The behaviour of a solution of \\eqref{diffusion-wave-PDE} is said to be `diffusion-like' (respectively, `wave-like') when $0 < \\nu \\le \\frac{1}{2}$ (respectively, $\\frac{1}{2} < \\nu \\le 1$) and we refer to \\eqref{diffusion-wave-PDE} as the time-fractional diffusion equation (respectively, time-fractional wave equation). \n\nThe definition of $\\D{}{}{}{2 \\nu}$ relies on certain concepts from the field of mathematics known as the fractional calculus~\\citep{MiRo93,Po99}. For notational convenience, define the function\n\\begin{equation}\n\\label{delta-fun}\n\\delta_\\mu(t) = \n\\begin{cases}\n\\frac{t^{\\mu - 1}}{\\Gamma(\\mu)} & \\text{if $\\mu > 0$}, \\\\\n\\delta(t) & \\text{if $\\mu = 0$}, \n\\end{cases}\n\\end{equation}\nwhere $\\Gamma(\\mu)$ is the Euler gamma function and $\\delta(t)$ is the Dirac delta function. For a suitable function~$y(t)$, the Riemann-Liouville fractional integral of order~$\\mu$ is \n$$\n\\D{}{0}{t}{-\\mu} y(t) = \\frac{1}{\\Gamma(\\mu)} \\int_0^t (t - \\tau)^{\\mu - 1} y(\\tau) \\, \\d \\tau.\n$$\nThis can be expressed as the Laplace convolution\n\\begin{equation}\n\\label{conv-int}\n\\D{}{0}{t}{-\\mu} y(t) = (\\delta_\\mu * y)(t).\n\\end{equation} \nLet $\\ceil{\\mu}$ denote the least integer greater than or equal to $\\mu$, so that $\\ceil{\\mu} \\ge \\mu$. The Caputo fractional derivative of order~$\\mu$ is defined as\n$$\n\\D{C}{0}{t}{\\mu} y(t) = \\D{}{0}{t}{-(\\ceil{\\mu} - \\mu)} D^{\\ceil{\\mu}} y(t),\n$$\nwhereas the Riemann-Liouville fractional derivative of order~$\\mu$ is given by\n$$\n\\D{}{0}{t}{\\mu} y(t) = D^{\\ceil{\\mu}} \\D{}{0}{t}{-(\\ceil{\\mu} - \\mu)} y(t).\n$$\nHere, $\\D{}{0}{t}{-(\\ceil{\\mu} - \\mu)}$ is a Riemann-Liouville fractional integral operator and $D^{\\ceil{\\mu}}$ is an ordinary derivative operator. When $\\mu = m \\in \\mathbb N$, the Riemann-Liouville fractional integral reduces to $m$-fold integration, while the Caputo and Riemann-Liouville fractional derivatives simplify to $m$-fold differentiation. \n\nThis article considers initial-boundary value problems~(IBVPs) and moving boundary problems associated with \\eqref{diffusion-wave-PDE} both when $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$ and $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$, where $0 < \\nu \\le \\frac{1}{2}$. Note that if $n \\in \\mathbb N$, then $D^n u(x,t)$ refers to the $n$th partial derivative of $u(x,t)$ with respect to $t$. \n\nThe Caputo time-fractional diffusion equation (i.e.~$\\D{}{}{}{2 \\nu}= \\D{C}{0}{t}{2 \\nu}$ and $0 < \\nu \\le \\frac{1}{2}$) was used by \\citet{Ni86} to model diffusion in media with fractal geometry. More recently, using a Caputo time-fractional diffusion equation, \\citet{WeChZh15} developed a model to describe how chloride ions penetrate reinforced concrete structures exposed to chloride environments.\n\nMoving boundary problems arise in many areas of science and engineering~\\citep{Cr84,Hi87,Gu03}. Some applications include modelling of biological and tumour invasion~\\citep{CrGu72,ElMcSi20}, drug delivery~\\citep{SaMaHa17} and melting of crystal dendrite~\\citep{MoKiMoMc19}. The classical one-dimensional Stefan problem is a canonical moving boundary problem that models the melting of ice; see some historical notes in \\citet{Vu93}. In this context, the PDE is referred to as the heat equation instead of the diffusion equation. Since Stefan's seminal work, moving boundary problems have been extensively studied. Excellent surveys can be found in the books by \\cite{Cr84,Hi87,Gu03} and the references therein. \n\nAs moving boundary problems are typically nonlinear, they are usually studied using numerical and approximate analytical methods. \\citet{Fu80} performed a comparison of different numerical methods for moving boundary problems; see also \\citet{CaKw04,LeBaLa15} for a study of numerical methods for one-dimensional Stefan problems. Approximate analytical methods for one-dimensional Stefan problems include the heat balance integral method~\\citep{Go58,MiMy08,MiMy12}, the refined integral method~\\citep{SaSiCo06} and the combined integral method~\\citep{MiMy11,Mi12}.\n\nExact analytical solutions of some one-dimensional Stefan problems are reviewed in \\citet{Cr84,Hi87}. However, such solutions of moving boundary problems are quite rare because these problems are highly nonlinear. Hence standard methods for linear problems such as separation of variables, Green's functions and integral transforms are usually not applicable. \\citet{RoTh21} used the embedding method to find exact analytical solutions of one-dimensional moving boundary problems for the heat equation. They also showed how the embedding method can be adapted to two-phase Stefan problems. \n\nIn fact, \\citet{RoTh21} considered a general IBVP for the heat equation with time-dependent boundary condition~(BCs) and derived the analytical solution using an embedding technique. The same technique is able to handle both bounded and unbounded spatial domains, unlike the standard solution techniques mentioned above. More recently, \\cite{RoTh22} studied a diffusion-advection-reaction equation and solved the associated IBVP analytically with the embedding method and proposed a numerical method for solving systems of linear Volterra integral equations of the first kind that naturally arise from the technique. The embedding method was introduced in \\citet{Ro14} in the context of pricing American call and put options, and was subsequently adapted to price barrier options~\\citep{GuRoSa20} and perpetual American options with general payoffs~\\citep{Ro22a}. \n\nIn many applications of diffusion-advection-reaction equations to model contaminant or solute transport in porous media, the boundaries are usually assumed to be constant in time. However, solute transport problems can involve various types of time-dependent BCs~\\citep{NgRiSt88,HoGeLe00,GaFuZhMa13}. The application of the embedding method to multilayer diffusion problems with time-dependent BCs is the subject of a recent article~\\citep{Ro22c}.\n\n\\citet{Ro22b} extended the embedding technique to propose a unified way to solve initial value problems~(IVPs) and IBVPs for the time-fractional diffusion-wave equation~\\eqref{diffusion-wave-PDE} (i.e.~$0 < \\nu \\le 1$). The class of IBVPs considered was limited to those with spatial domains where $0 \\le x < \\infty$ and with Dirichlet-type (time-constant) BCs imposed at $x = 0$. The first contribution of the present article is to generalise the results in \\citet{Ro22b} by solving IBVPs for the time-fractional diffusion equation (i.e.~$0 < \\nu \\le \\frac{1}{2}$) with general time-dependent BCs over bounded and unbounded domains, similar to what was done in \\citet{RoTh21} for the classical diffusion equation. The second contribution of the present article is to use the generalisation to find analytical solutions of moving boundary problems for the time-fractional diffusion equation. The reason for the restriction~$0 < \\nu \\le \\frac{1}{2}$, instead of $0 < \\nu \\le 1$, is because we wish to consider `fractional Stefan problems' in this article. Hence we have to restrict to moving boundary problems whose solutions have `diffusion-like behaviour'. \n\nThe formulation of Stefan problems for the heat equation includes an extra condition (known as the Stefan condition) that prescribes the dynamics for the unknown moving boundary. As we will consider the time-fractional diffusion equation here, the Stefan condition will be replaced by an analogous `fractional Stefan condition'. However, it is important to point out that the physical motivation for considering moving boundary problems (in fact, IBVPs in general) for the time-fractional diffusion equation remains an open problem. In this article, we approach the study of such problems from a theoretical viewpoint. \n\nThe outline of this article as follows. In Section~2, we revisit a two-parameter auxiliary function introduced in \\citet{Ro22b} by first summarising some of its properties and then deriving new properties that will be especially relevant for moving boundary problems. In Section~3, we formulate a general IBVP for the time-fractional diffusion equation and obtain the solution using the embedding method. Section~4 studies moving boundary problems via two illustrative examples, one with a bounded domain and the other with an unbounded domain. Brief concluding remarks are given in Section~5.\n\n\\section{A useful auxiliary function and its properties}\n\nIn this section, we investigate some properties of an auxiliary function that are useful in the study of the time-fractional diffusion-wave equation. \n\n\n\\subsection{Summary of known properties of the auxiliary function}\n\nLet $\\mu \\ge 0$, $0 < \\nu \\le 1$ and $a > 0$. \\citet{Ro22b} defined the function\n\\begin{equation}\n\\label{R-def}\nR_{\\mu,\\nu}(a,t) = \\L^{-1}\\{s^{-\\mu} \\mathrm e^{-a s^\\nu};t\\}\n\\end{equation}\nas an inverse Laplace transform. Since $\\L\\{\\D{}{0}{t}{-\\mu}f(t);s\\} = s^{-\\mu} \\L\\{f(t);s\\}$, we deduce that \n\\begin{equation}\n\\label{R-basic}\nR_{\\mu,\\nu}(a,t) = {}_{0}^{}D_{t}^{-\\mu} R_{0,\\nu}(a,t)\n\\end{equation} \nand thus $R_{0,\\nu}(a,t)$ can be interpreted as more `basic' than $R_{\\mu,\\nu}(a,t)$. For the convenience of the reader, in this subsection, we summarise some of the properties of $R_{\\mu,\\nu}(a,t)$ that were proved in \\citet{Ro22b}.\n\nThe function~$y(t) = R_{\\mu,\\nu}(a,t)$ verifies $y(0+) = 0$ and satisfies the fractional integral equation\n\\begin{equation}\n\\label{R-int-eq}\na \\nu \\D{}{0}{t}{-(1 - \\nu)} y(t) = t y(t) - \\mu \\int_0^t y(\\tau) \\, \\d \\tau\n\\end{equation}\nand the fractional ordinary differential equation\n\\begin{equation}\n\\label{R-diff-eq}\na \\nu \\D{}{0}{t}{\\nu} y(t) = a \\nu \\D{C}{0}{t}{\\nu} y(t) = t y'(t) + (1 - \\mu) y(t).\n\\end{equation}\nTo evaluate $R_{\\mu,\\nu}(t)$, we can either perform a numerical Laplace transform inversion in \\eqref{R-def} or implement finite difference schemes to solve the integral equation~\\eqref{R-int-eq} or the differential equation~\\eqref{R-diff-eq}. For example, numerical Laplace transform inversion was used to obtain profiles of $R_\\nu(2.5,t)$, as shown in Figure~1 for $\\nu = 0.3, 0.4, 0.5, 0.6, 0.7$.\n\\begin{figure}[ht]\n\\label{R-plot}\n\\centering\n\\includegraphics[scale=0.3]{R.eps}\n\\caption{Plot of $R_\\nu(2.5,t)$ for different values of $\\nu$.}\n\\end{figure}\n\nWhen $\\mu = 0$, $0 < \\nu \\le \\frac{1}{2}$ and $a > 0$, an alternative integral representation of \\eqref{R-def} is\n\\begin{equation}\n\\label{R-alt}\nR_{0,\\nu}(a,t) = \\frac{1}{\\pi} \\int_0^\\infty \\mathrm e^{-t z} \\mathrm e^{-a \\cos(\\pi \\nu) z^\\nu} \\sin(a \\sin(\\pi \\nu) z^\\nu) \\, \\d z.\n\\end{equation}\nAn analogous integral representation when $\\mu > 0$, $0 < \\nu \\le \\frac{1}{2}$ and $a > 0$ can be obtained using \\eqref{R-basic} in \\eqref{R-alt} and taking the Riemann-Liouville fractional integral of the exponential function~$t \\mapsto \\mathrm e^{-t z}$. Note, however, that \\eqref{R-alt} is not necessarily valid when $\\frac{1}{2} < \\nu \\le 1$~\\citep{Ro22b}. \n\nIf $\\mu \\ge 0$, $0 < \\nu \\le 1$ and $a > 0$, then\n\\begin{equation}\n\\label{R-int-prop-1}\nR_{\\mu + \\nu,\\nu}(a,t) = \\int_a^\\infty R_{\\mu,\\nu}(z,t) \\, \\d z.\n\\end{equation}\nIn particular, $\\mu = \\nu$ gives\n\\begin{equation}\n\\label{R-int-prop-2}\nR_{2 \\nu,\\nu}(a,t) = \\int_a^\\infty R_{\\nu,\\nu}(z,t) \\, \\d z.\n\\end{equation}\nSome special cases are\n\\begin{equation}\n\\label{R-special-cases}\nR_{0,\\frac{1}{2}}(a,t) = \\frac{a \\mathrm e^{-\\frac{a^2}{4 t}}}{2 \\sqrt{\\pi t^3}}, \\quad R_{\\frac{1}{2},\\frac{1}{2}}(a,t) = \\frac{\\mathrm e^{-\\frac{a^2}{4 t}}}{\\sqrt{\\pi t}}, \\quad R_{1,\\frac{1}{2}}(a,t) = \\erfc\\Big(\\frac{a}{2 \\sqrt{t}}\\Big),\n\\end{equation}\nwhich follow from \\eqref{R-alt}, \\eqref{R-basic} and \\eqref{R-int-prop-2}, respectively.\n\n\\subsection{Further properties of the auxiliary function}\n\nHere, we derive new properties of the auxiliary function that are needed for solving IBVPs for the time-fractional diffusion equation.\n\nIn the previous subsection, it was pointed out that $R_{\\mu,\\nu}(a,0+) = 0$ for a fixed~$a$. The following result derives a similar property for $R_{\\mu,\\nu}(0+,t)$ with $t$ fixed.\n\\begin{prop}\n\\label{R-a-zero}\nSuppose that $\\mu \\ge 0$, $0 < \\nu \\le 1$ and $a > 0$. Then, for $t > 0$, there holds\n$$\nR_{\\mu,\\nu}(0+,t) = \\lim_{a \\rightarrow 0^+} R_{\\mu,\\nu}(a,t) =\n\\begin{cases}\n\\delta_\\mu(t) & \\text{if $\\mu > 0$}, \\\\\n\\delta(t) & \\text{if $\\mu = 0$},\n\\end{cases}\n$$\nwhere $\\delta_\\mu(t)$ is given in \\eqref{delta-fun} and $\\delta(t)$ is the Dirac delta function.\n\\end{prop}\n\\begin{proof}\nIf $\\mu > 0$, then from \\eqref{R-def} we get\n$$\nR_{\\mu,\\nu}(0+,t) = \\lim_{a \\rightarrow 0^+} R_{\\mu,\\nu}(a,t) = \\L^{-1}\\{s^{-\\mu};t\\} = \\frac{t^{\\mu - 1}}{\\Gamma(\\mu)} = \\delta_\\mu(t).\n$$\nSimilarly, if $\\mu = 0$, then\n$$\nR_{0,\\nu}(0+,t) = \\lim_{a \\rightarrow 0^+} R_{0,\\nu}(a,t) = \\L^{-1}\\{1;t\\} = \\delta(t).\n$$\n\\end{proof}\n\nThe next proposition will be used when taking the spatial derivative of the solution of an associated IBVP. Note the assumption~$\\mu \\ge \\nu$ here.\n\\begin{prop}\n\\label{R-partial-a}\nIf $0 < \\nu \\le 1$, $\\mu \\ge \\nu$ and $a > 0$, then\n\\begin{equation*}\n\\frac{\\partial R_{\\mu,\\nu}}{\\partial a}(a,t) = -R_{\\mu - \\nu,\\nu}(a,t).\n\\end{equation*}\n\\end{prop}\n\\begin{proof}\nIt is straightforward to see from \\eqref{R-def} that\n$$\n\\frac{\\partial R_{\\mu,\\nu}}{\\partial a}(a,t) = \\L^{-1}\\{s^{-\\mu} \\mathrm e^{-a s^\\nu} (-s^\\nu);t\\} = -\\L^{-1}\\{s^{-(\\mu - \\nu)} \\mathrm e^{-a s^\\nu};t\\} = -R_{\\mu - \\nu,\\nu}(a,t).\n$$\n\\end{proof}\n\nThe next task is to obtain a series representation for $R_{\\mu,\\nu}(a,t)$. Recall the Mainardi function~$M(z;\\nu)$ with the series representation~\\citep{Ma96}\n$$\nM(z;\\nu) = \\sum_{j = 0}^\\infty \\frac{(-z)^j}{j! \\Gamma(-\\nu j + (1 - \\nu))},\n$$\nwhere $0 < \\nu < 1$. It turns out to be a special case of the Wright function~$W(z;\\alpha,\\beta)$ with the series representation~\\citep{MaPa03}\n\\begin{equation}\n\\label{W-series}\nW(z;\\alpha,\\beta) = \\sum_{j = 0}^\\infty \\frac{z^j}{j! \\Gamma(\\alpha j + \\beta)}, \n\\end{equation}\nwhere $\\alpha > -1$ and $\\beta > 0$ (in fact, it is also valid for $\\beta \\in \\mathbb C$). More precisely, $M(z;\\nu) = W(-z;-\\nu,1 - \\nu)$. An interesting relation pointed out in \\citet{Ro22b}, valid when $0 < \\nu \\le \\frac{1}{2}$, is\n\\begin{equation}\n\\label{MWR-rel}\nM(a t^{-\\nu};\\nu) = W(-a t^{-\\nu};-\\nu,1 - \\nu) = t^\\nu R_{1 - \\nu,\\nu}(a,t) = t^\\nu \\D{}{0}{t}{-(1 - \\nu)} R_{0,\\nu}(a,t).\n\\end{equation}\nWe will use \\eqref{MWR-rel} to derive a series representation for $R_{\\mu,\\nu}(a,t)$ when $\\mu \\ge 0$, $0 < \\nu \\le \\frac{1}{2}$ and $a > 0$.\n\n\\begin{prop}\n\\label{R-series}\nLet $\\mu \\ge 0$, $0 < \\nu \\le \\frac{1}{2}$ and $a > 0$. A series representation for $R_{\\mu,\\nu}(a,t)$ is given by\n$$\nR_{\\mu,\\nu}(a,t) = t^{\\mu - 1} W(- a t^{-\\nu};-\\nu,\\mu) = \\sum_{j = 0}^\\infty \\frac{(- a t^{-\\nu})^j}{j! \\Gamma(-\\nu j + \\mu)}.\n$$\n\\end{prop}\n\\begin{proof}\nThe series representation~\\eqref{W-series} yields\n$$\nW(-a t^{-\\nu};-\\nu,1 - \\nu) = \\sum_{j = 0}^\\infty \\frac{(-a t^{-\\nu})^j}{j! \\Gamma(-\\nu j - \\nu + 1)},\n$$\nwhich in turn gives\n$$\nt^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu) = \\sum_{j = 0}^\\infty \\frac{(-a)^j t^{-\\nu j - \\nu}}{j! \\Gamma(-\\nu j - \\nu + 1)}.\n$$\nSince \n$$\nR_{0,\\nu}(a,t) = \\D{}{0}{t}{(1 - \\nu)} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu))\n$$\nfrom \\eqref{MWR-rel}, we obtain\n\\begin{align*}\n\\D{}{0}{t}{(1 - \\nu)} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu)) & = \\D{}{0}{t}{\\ceil{1 - \\nu}} \\D{}{0}{t}{-(\\ceil{1 - \\nu} - (1 - \\nu))} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu)) \\\\\n& = D^1 \\D{}{0}{t}{-\\nu} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu))\n\\end{align*}\nand\n\\begin{align*}\n\\D{}{0}{t}{-\\nu} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu)) & = \\sum_{j = 0}^\\infty \\frac{(-a)^j}{j! \\Gamma(-\\nu j - \\nu + 1)} \\D{}{0}{t}{-\\nu}(t^{-\\nu j - \\nu}) = \\sum_{j = 0}^\\infty \\frac{(-a)^j t^{-\\nu j}}{j! \\Gamma(1 - \\nu j)}.\n\\end{align*}\nHence\n$$\nR_{0,\\nu}(a,t) = \\D{}{0}{t}{(1 - \\nu)} (t^{-\\nu} W(-a t^{-\\nu};-\\nu,1 - \\nu)) = \\sum_{j = 0}^\\infty \\frac{(-a)^j t^{-\\nu j - 1}}{j! \\Gamma(-\\nu j)}.\n$$\nEq.~\\eqref{R-basic} implies that\n\\begin{align*}\nR_{\\mu,\\nu}(a,t) & = \\D{}{0}{t}{-\\mu} R_{0,\\nu}(a,t) = \\sum_{j = 0}^\\infty \\frac{(-a)^j}{j! \\Gamma(-\\nu j)} \\D{}{0}{t}{-\\mu} (t^{-\\nu j - 1}) \\\\\n& = t^{\\mu - 1} \\sum_{j = 0}^\\infty \\frac{(- a t^{-\\nu})^j}{j! \\Gamma(-\\nu j + \\mu)} = t^{\\mu - 1} W(- a t^{-\\nu};-\\nu,\\mu).\n\\end{align*}\n\\end{proof}\n\n\\begin{rem}\nThe result of Proposition~\\ref{R-series} relies on the relation~\\eqref{MWR-rel}, which is true if $0 < \\nu \\le \\frac{1}{2}$. It is an open problem to determine whether the series representation is also valid for $\\frac{1}{2} < \\nu \\le 1$.\n\\end{rem}\n\n\\begin{rem}\nAside from the auxiliary function~$M(z;\\nu)$, \\citet{MaPa03} also introduced the auxiliary function\n$$\nF(z;\\nu) = \\sum_{j = 0}^\\infty \\frac{(-z)^j}{j! \\Gamma(-\\nu j)}.\n$$\nIt follows from Proposition~\\ref{R-series} that $M(z;\\nu)$ and $F(z;\\nu)$ can be expressed in terms of $R_{\\mu,\\nu}(a,t)$ as\n$$\nM(a t^{-\\nu};\\nu) = t^\\nu R_{1 - \\nu,\\nu}(a,t) = t^\\nu \\D{}{0}{t}{-(1 - \\nu)} R_{0,\\nu}(a,t), \\quad F(a t^{-\\nu};\\nu) = t R_{0,\\nu}(a,t),\n$$\nrespectively. Thus we deduce another relation between $M(z;\\nu)$ and $F(z;\\nu)$, namely\n$$\nM(a t^{-\\nu};\\nu) = t^\\nu \\D{}{0}{t}{-(1 - \\nu)}(t^{-1} F(a t^{-\\nu};\\nu)).\n$$\n\\end{rem}\n\n\\begin{ex}\nSome special values of the Wright function are known~\\citep{MaPa03}:\n\\begin{equation}\n\\label{W-special}\nW\\Big(-z;-\\frac{1}{2},\\frac{1}{2}\\Big) = \\frac{\\mathrm e^{-\\frac{z^2}{4}}}{\\sqrt{\\pi}}, \\quad W\\Big(-z;-\\frac{1}{2},1\\Big) = 1 - \\erf\\Big(\\frac{z}{2}\\Big) = \\erfc\\Big(\\frac{z}{2}\\Big).\n\\end{equation}\nUsing Proposition~\\ref{R-series}, it is not difficult to see that the second and third relations in \\eqref{R-special-cases} are recovered.\n\\end{ex}\n\n\\begin{prop}\n\\label{R-integral}\nIf $\\mu \\ge 0$ and $0 < \\nu \\le 1$, then\n$$\n\\int_{-\\infty}^\\infty \\frac{1}{2} R_{\\mu,\\nu}(\\vert z \\vert,t) \\, \\d z = \\delta_{\\mu + \\nu}(t),\n$$\nwhere $\\delta_{\\mu + \\nu}(t)$ is given by \\eqref{delta-fun}.\n\\end{prop}\n\\begin{proof}\nThe definition in \\eqref{R-def} leads to\n\\begin{align*}\n\\int_{-\\infty}^\\infty R_{\\mu,\\nu}(\\vert z \\vert,t) \\, \\d z & = \\int_{-\\infty}^\\infty \\L^{-1}\\{s^{-\\mu} \\mathrm e^{-\\vert z \\vert s^\\nu};t\\} \\, \\d z = \\L^{-1}\\Big\\{\\int_{-\\infty}^\\infty s^{-\\mu} \\mathrm e^{-\\vert z \\vert s^\\nu} \\, \\d z;t\\Big\\} \\\\\n& = 2 \\L^{-1}\\{s^{-(\\mu + \\nu)};t\\} = \\frac{2 t^{\\mu + \\nu - 1}}{\\Gamma(\\mu + \\nu)} = 2 \\delta_{\\mu + \\nu}(t).\n\\end{align*}\nNote that $\\int_{-\\infty}^\\infty \\frac{1}{2} R_{\\mu,\\nu}(\\vert z \\vert,t) \\, \\d z = 1$ only if $\\mu + \\nu = 1$. This observation is related to the generation of probability distributions from the time-fractional diffusion equation discussed in \\cite{Ro22b}.\n\\end{proof}\n\n\\section{Solution of a general IBVP for the time-fractional diffusion equation using the embedding approach}\n\nIn this section, we formulate a general IBVP for the time-fractional diffusion equation (i.e.~$0 < \\nu \\le \\frac{1}{2}$) defined on bounded or unbounded spatial domains, and derive the analytical solution using the embedding approach.\n\nLet $f(x)$, $g^\\pm(t)$ and $\\eta^\\pm(t)$ be given suitable functions. Suppose that $-\\infty \\le \\eta^-(t) < \\eta^+(t) \\le \\infty$ for $t > 0$, which ensures that both bounded and unbounded spatial domains are taken into account. Let $a$, $b$, $c$ and $d$ be constants such that $\\vert a \\vert + \\vert b \\vert > 0$ and $\\vert c \\vert + \\vert d \\vert > 0$.\n\nConsider the IBVP\n\\begin{equation}\n\\label{gen-IBVP}\n\\left\\{\n\\begin{split}\n& \\D{}{}{}{2 \\nu} u = \\kappa \\frac{\\partial^2 u}{\\partial x^2}, \\quad \\eta^-(t) < x < \\eta^+(t), \\quad t > 0, \\\\\n& \\Phi u(x,0+) = f(x), \\quad \\eta^-(0) \\le x \\le \\eta^+(0), \\\\\n& a u(\\eta^-(t),t) + b \\frac{\\partial u}{\\partial x}(\\eta^-(t),t) = g^-(t), \\quad t > 0, \\\\\n& c u(\\eta^+(t),t) + d \\frac{\\partial u}{\\partial x}(\\eta^+(t),t) = g^+(t), \\quad t > 0,\n\\end{split}\n\\right.\n\\end{equation}\nwhere $\\D{}{}{}{2 \\nu}$ is either a Caputo fractional derivative ($\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$) or a Riemann-Liouville fractional derivative ($\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$). The operator~$\\Phi$ defines the initial condition~(IC) through\n$$\n\\Phi u =\n\\begin{cases}\nu & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n\\D{}{0}{t}{-(1 - 2 \\nu)} & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n$$\nThe motivation behind the choice of the IC was given in \\citet{Ro22b} as a natural consequence of the Laplace transform properties of the Caputo and Riemann-Liouville fractional derivatives. We assume that the IBVP~\\eqref{gen-IBVP} is well posed.\n\n\\begin{rem}\nIn the special case when $\\nu = \\frac{1}{2}$, the time-fractional diffusion equation reduces to the classical diffusion equation, and the analytical solution of \\eqref{gen-IBVP} was obtained in \\citet{RoTh21} via the embedding method. The numerical solution of a generalisation of \\eqref{gen-IBVP} with advection and reaction terms was addressed in \\citet{RoTh22}.\n\\end{rem}\n\n\\begin{rem}\nThe embedding method was used in \\citet{Ro22b} to provide a unified way to solve IVPs and IBVPs. However, the IBVP studied there is a very special case of \\eqref{gen-IBVP}, i.e.~$\\eta^-(t) = 0$, $\\eta^+(t) = \\infty$ and only a Dirichlet-type BC of the form~$u(x,0+) = h(t)$ for a given function~$h(t)$ was considered at the left endpoint.\n\\end{rem}\n\nLet $f_\\mathrm{ext}(x)$ be an extension of $f(x)$ such that $f_\\mathrm{ext}(x) \\vert_{\\eta^-(0) \\le x \\le \\eta^+(0)} = f(x)$. Denote by $\\chi_A(x)$ the indicator function of the set~$A$, i.e.~$\\chi_A(x) = 1$ if $x \\in A$ and $\\chi_A(x) = 0$ if $x \\notin A$. We can embed the PDE and IC in \\eqref{gen-IBVP} into the IVP on the real line for $v(x,t)$, namely\n\\begin{equation}\n\\label{v-IVP}\n\\begin{split}\n& {}_{}^{}D_{}^{2 \\nu} v = \\kappa \\frac{\\partial^2 v}{\\partial x^2} + F(x,t), \\quad x \\in \\mathbb R, \\quad t > 0, \\\\\n& v(x,0) = f_\\mathrm{ext}(x), \\quad x \\in \\mathbb R, \n\\end{split}\n\\end{equation}\nwhere\n$$\nF(x,t) = \\varphi^-(t) \\chi_{(-\\infty,\\eta^-(t)]}(x) + \\varphi^+(t) \\chi_{[\\eta^+(t),\\infty)}(x) = \n\\begin{cases}\n\\varphi^-(t) & \\text{if $x \\le \\eta^-(t)$}, \\\\\n0 & \\text{if $\\eta^-(t) < x < \\eta^+(t)$}, \\\\\n\\varphi^+(t) & \\text{if $x \\ge \\eta^+(t)$}.\n\\end{cases}\n$$\nThe arbitrary functions~$\\varphi^\\pm(t)$ are to be determined such that the BCs in \\eqref{gen-IBVP} are satisfied when we restrict $\\eta^-(t) \\le x \\le \\eta^+(t)$.\n\n\\begin{rem}\nBefore we proceed to give the solution of \\eqref{v-IVP}, we make a few observations. We can write\n\\begin{align*}\n& \\int_0^t \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) F(\\xi,\\tau) \\, \\d \\xi \\, \\d \\tau \\\\\n& \\qquad = \\int_0^t \\varphi^-(\\tau) \\int_{-\\infty}^{\\eta^-(\\tau)} \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi \\, \\d \\tau \\\\\n& \\qquad \\quad {} + \\int_0^t \\varphi^+(\\tau) \\int_{\\eta^+(\\tau)}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi \\, \\d \\tau.\n\\end{align*}\nSuppose that $\\eta^-(t) \\le x \\le \\eta^+(t)$. The argument when $x = \\eta^\\pm(t)$ can be justified with Proposition~\\ref{R-a-zero}. In the first integral on the right-hand side, noting that $-\\infty < \\xi \\le \\eta^-(\\tau) \\le x$, we have from \\eqref{R-int-prop-2} that\n\\begin{align*}\n\\int_{-\\infty}^{\\eta^-(\\tau)} \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi \\, \\d \\tau & = \\int_{-\\infty}^{\\eta^-(\\tau)} \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi \\\\\n& = \\int_{\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}}}^\\infty \\frac{1}{2} R_{\\nu,\\nu}(z,t - \\tau) \\, \\d z \\\\\n& = \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big).\n\\end{align*}\nSimilarly, $x \\le \\eta^+(\\tau) \\le \\xi < \\infty$ in the second integral, giving\n\\begin{align*}\n\\int_{\\eta^+(\\tau)}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi & = \\int_{\\eta^+(\\tau)}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\, \\d \\xi \\\\\n& = \\int_{\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}}}^\\infty \\frac{1}{2} R_{\\nu,\\nu}(z,t - \\tau) \\, \\d z \\\\\n& = \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big).\n\\end{align*}\nTherefore \n\\begin{equation}\n\\label{F-integral}\n\\begin{split}\n\\int_0^t \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) F(\\xi,\\tau) \\, \\d \\xi \\, \\d \\tau & = \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{split}\n\\end{equation}\n\\end{rem}\n\nWe will separate the analysis of \\eqref{v-IVP} according to the type of fractional derivative operator~$\\D{}{}{}{2 \\nu}$ being considered.\n\n\\subsection{Caputo time-fractional diffusion equation}\n\nSuppose that $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$. It was shown in \\citet{Ro22b} that the solution of the IVP~\\eqref{v-IVP} is\n\\begin{equation*}\n\\begin{split}\nv(x,t) & = \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} + \\int_0^t \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu} \\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t - \\tau\\Big) F(\\xi,\\tau) \\, \\d \\xi \\, \\d \\tau.\n\\end{split}\n\\end{equation*}\nHence, restricting $\\eta^-(t) \\le x \\le \\eta^+(t)$ and recalling \\eqref{F-integral}, the function\n\\begin{equation}\n\\label{u-sol-1}\n\\begin{split}\nu(x,t) & = \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau \n\\end{split}\n\\end{equation}\nsatisfies the PDE and IC of \\eqref{gen-IBVP}, but not necessarily the BCs.\n\nTo verify the BCs, we need to take the partial derivative of \\eqref{u-sol-1} with respect to $x$. Breaking up the first integral on the right-hand side,\n\\begin{align*}\nu(x,t) & = \\int_{-\\infty}^{x} \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi - \\int_\\infty^x \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{align*}\nPerforming straightforward calculations with the help of Proposition~\\ref{R-partial-a}, we obtain\n\\begin{align*}\n\\frac{\\partial}{\\partial x}\\int_{-\\infty}^{x} \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi & = \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}(0+,t) f_\\mathrm{ext}(x) \\\\\n& \\quad {} - \\int_{-\\infty}^{x} \\frac{1}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi,\n\\end{align*}\n\\begin{align*}\n-\\frac{\\partial}{\\partial x}\\int_\\infty^x \\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \n& = -\\frac{1}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}(0+,t) f_\\mathrm{ext}(x) \\\\\n& \\quad {} + \\int_x^{\\infty} \\frac{1}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi,\n\\end{align*}\n\\begin{align*}\n\\frac{\\partial}{\\partial x}\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau & = - \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau\n\\end{align*}\nand\n\\begin{align*}\n\\frac{\\partial}{\\partial x}\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau & = \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{align*}\nCombining these integrals, we get\n\\begin{equation}\n\\label{u-sol-1-der}\n\\begin{split}\n\\frac{\\partial u}{\\partial x}(x,t) \n& = - \\int_{-\\infty}^{x} \\frac{1}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi + \\int_x^{\\infty} \\frac{1}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\ \n& \\quad {} - \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{split}\n\\end{equation}\n\nWe introduce some simplifying notation. Identify $\\eta^-_1$ with $\\eta^-(t)$, $\\eta^-_2$ with $\\eta^-(\\tau)$, $\\eta^+_1$ with $\\eta^+(t)$ and $\\eta^+_2$ with $\\eta^+(\\tau)$. Define the kernel functions\n\\begin{align*}\nK_{11}(\\eta^-_1,\\eta^-_2,\\eta^+_1,\\eta^+_2,t) & = \\frac{a}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^-_1 - \\eta^-_2}{\\sqrt{\\kappa}},t\\Big) - \\frac{b}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^-_1 - \\eta^-_2}{\\sqrt{\\kappa}},t\\Big), \\\\\nK_{12}(\\eta^-_1,\\eta^-_2,\\eta^+_1,\\eta^+_2,t) & = \\frac{a}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+_2 - \\eta^-_1}{\\sqrt{\\kappa}},t\\Big) + \\frac{b}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+_2 - \\eta^-_1}{\\sqrt{\\kappa}},t\\Big), \\\\\nK_{21}(\\eta^-_1,\\eta^-_2,\\eta^+_1,\\eta^+_2,t) & = \\frac{c}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+_1 - \\eta^-_2}{\\sqrt{\\kappa}},t\\Big) - \\frac{d}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+_1 - \\eta^-_2}{\\sqrt{\\kappa}},t\\Big), \\\\\nK_{22}(\\eta^-_1,\\eta^-_2,\\eta^+_1,\\eta^+_2,t) & = \\frac{c}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+_2 - \\eta^+_1}{\\sqrt{\\kappa}},t\\Big) + \\frac{d}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+_2 - \\eta^+_1}{\\sqrt{\\kappa}},t\\Big).\n\\end{align*}\nMoreover, define\n\\begin{equation}\n\\label{h-minus-1}\n\\begin{split}\nh^-(t) & = g^-(t) - \\int_{-\\infty}^\\infty \\frac{a}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\vert \\eta^-(t) - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} +\\int_{-\\infty}^{\\eta^-(t)} \\frac{b}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\eta^-(t) - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} - \\int_{\\eta^-(t)}^{\\infty} \\frac{b}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\xi - \\eta^-(t)}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi\n\\end{split}\n\\end{equation}\nand\n\\begin{equation}\n\\label{h-plus-1}\n\\begin{split}\nh^+(t) & = g^+(t) - \\int_{-\\infty}^\\infty \\frac{c}{2 \\sqrt{\\kappa}} R_{1 - \\nu,\\nu}\\Big(\\frac{\\vert \\eta^+(t) - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} + \\int_{-\\infty}^{\\eta^+(t)} \\frac{d}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\eta^+(t) - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} - \\int_{\\eta^+(t)}^{\\infty} \\frac{d}{2 \\kappa} R_{1 - 2 \\nu,\\nu}\\Big(\\frac{\\xi - \\eta^+(t)}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi.\n\\end{split}\n\\end{equation}\nSubstituting the above expressions into the BCs in \\eqref{gen-IBVP}, the left BC becomes\n\\begin{equation}\n\\label{left-BC}\n\\begin{split}\n& \\int_0^t K_{11}(\\eta^-(t),\\eta^-(\\tau),\\eta^+(t),\\eta^+(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t K_{12}(\\eta^-(t),\\eta^-(\\tau),\\eta^+(t),\\eta^+(\\tau),t - \\tau) \\varphi^+(\\tau) \\, \\d \\tau = h^-(t),\n\\end{split}\n\\end{equation}\nwhile the right BC simplifies to\n\\begin{equation}\n\\label{right-BC}\n\\begin{split}\n& \\int_0^t K_{21}(\\eta^-(t),\\eta^-(\\tau),\\eta^+(t),\\eta^+(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t K_{22}(\\eta^-(t),\\eta^-(\\tau),\\eta^+(t),\\eta^+(\\tau),t - \\tau) \\varphi^+(\\tau) \\, \\d \\tau = h^+(t).\n\\end{split}\n\\end{equation}\n\nIn summary, the analytical solution of the IBVP~\\eqref{gen-IBVP} for the Caputo time-fractional diffusion equation is \\eqref{u-sol-1}, where $\\varphi^\\pm(t)$ satisfy the pair of linear Volterra integral equations of the first kind described by \\eqref{left-BC} and \\eqref{right-BC}. The functions~$h^\\pm(t)$ are given in \\eqref{h-minus-1} and \\eqref{h-plus-1}. Note that other choices of defining $f_\\mathrm{ext}(x)$ will result in a corresponding adjustment of $h^\\pm(t)$, yielding the same solution in the end.\n\n\\subsection{Riemann-Liouville time-fractional diffusion equation}\n\nNow take $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$. As the calculations are similar to the Caputo case, we just give the final result. The analytical solution of the IBVP~\\eqref{gen-IBVP} for the Riemann-Liouville time-fractional diffusion equation is\n\\begin{equation}\n\\label{u-sol-2}\n\\begin{split}\nu(x,t) & = \\int_{-\\infty}^\\infty \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\vert x - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{split}\n\\end{equation}\nNote that one difference between \\eqref{u-sol-2} and \\eqref{u-sol-1} is in the first integral on the right-hand side. The functions~$\\varphi^\\pm(t)$ satisfy the pair of linear Volterra integral equations of the first kind also described by \\eqref{left-BC} and \\eqref{right-BC} but $h^\\pm(t)$ are given by\n\\begin{equation}\n\\label{h-minus-2}\n\\begin{split}\nh^-(t) & = g^-(t) - \\int_{-\\infty}^\\infty \\frac{a}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\vert \\eta^-(t) - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} + \\int_{-\\infty}^{\\eta^-(t)} \\frac{b}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{\\eta^-(t) - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi - \\int_{\\eta^-(t)}^{\\infty} \\frac{b}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{\\xi - \\eta^-(t)}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi\n\\end{split}\n\\end{equation}\nand\n\\begin{equation}\n\\label{h-plus-2}\n\\begin{split}\nh^+(t) & = g^+(t) - \\int_{-\\infty}^\\infty \\frac{c}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\vert \\eta^+(t) - \\xi \\vert}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\\n& \\quad {} + \\int_{-\\infty}^{\\eta^+(t)} \\frac{d}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{\\eta^+(t) - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi - \\int_{\\eta^+(t)}^{\\infty} \\frac{d}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{\\xi - \\eta^+(t)}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi.\n\\end{split}\n\\end{equation}\n\nFor later use, we note that\n\\begin{equation}\n\\label{u-sol-2-der}\n\\begin{split}\n\\frac{\\partial u}{\\partial x}(x,t) & = - \\int_{-\\infty}^{x} \\frac{1}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{x - \\xi}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi + \\int_x^{\\infty} \\frac{1}{2 \\kappa} R_{0,\\nu}\\Big(\\frac{\\xi - x}{\\sqrt{\\kappa}},t\\Big) f_\\mathrm{ext}(\\xi) \\, \\d \\xi \\\\ \n& \\quad {} - \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{x - \\eta^-(\\tau)}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^-(\\tau) \\, \\d \\tau \\\\\n& \\quad {} + \\int_0^t \\frac{1}{2 \\sqrt{\\kappa}} R_{\\nu,\\nu}\\Big(\\frac{\\eta^+(\\tau) - x}{\\sqrt{\\kappa}},t - \\tau\\Big) \\varphi^+(\\tau) \\, \\d \\tau.\n\\end{split}\n\\end{equation}\n\n\\begin{rem}\nAs to be expected, when $\\nu = \\frac{1}{2}$, the Caputo solution~\\eqref{u-sol-1} and the Riemann-Liouville solution~\\eqref{u-sol-2} become identical and recover the analytical solution for the corresponding IBVP for the classical diffusion equation obtained in \\citet{RoTh21}. \n\\end{rem}\n\n\\section{Solutions of moving boundary problems associated with the time-fractional diffusion equation}\n\nWe are now ready to find analytical solutions of moving boundary problems for the time-fractional diffusion equation. Two representative examples will be considered with bounded and unbounded spatial domains. More general moving boundary problems can be handled in a similar fashion.\n\n\\begin{ex}\nConsider the moving boundary problem\n\\begin{equation}\n\\left\\{\n\\label{free-prob-1}\n\\begin{split}\n& \\D{}{}{}{2 \\nu} u = \\frac{\\partial^2 u}{\\partial x^2}, \\quad 0 < x < \\eta(t), \\quad t > 0, \\\\\n& u(x,0) = u_0 \\chi_{(0,\\infty)}(x), \\quad 0 \\le x < \\infty, \\\\\n& u(0,t) = 1, \\quad u(\\eta(t),t) = 0, \\quad t > 0, \\\\\n& \\D{}{}{}{2 \\nu} \\eta(t) = -\\frac{1}{r} \\frac{\\partial u}{\\partial x}(\\eta(t),t), \\quad t > 0.\n\\end{split}\n\\right.\n\\end{equation}\nHere, $r$ and $u_0$ are positive constants and $\\eta(t)$ is the moving boundary. The goal is to find $u(x,t)$ and $\\eta(t)$. \n\nWhen $\\nu = \\frac{1}{2}$, \\eqref{free-prob-1} reduces to a classical Stefan problem for the melting of ice over a one-dimensional semi-infinite spatial domain~\\citep{Cr84,Hi87}. In this context, the PDE under consideration is the heat equation. The interval~$[0,\\eta(t)]$ is the region occupied by water. The last equation in \\eqref{free-prob-1} is also known as the Stefan condition and $r$ is the ratio of latent to specific sensible heat. However, when $0 < \\nu < \\frac{1}{2}$, the physical interpretation of the problem in the context of melting of ice is not necessarily valid and we therefore study the IBVP~\\eqref{free-prob-1} strictly from a theoretical perspective. \n\nComparing \\eqref{free-prob-1} with \\eqref{gen-IBVP}, we identify $\\eta^-(t) = 0$, $\\eta^+(t) = \\eta(t)$, $\\kappa = 1$, $a = 1$, $b = 0$, $c = 1$, $d = 0$, $g^-(t) = 1$, $g^+(t) = 0$ and $f(x) = u_0 \\chi_{(0,\\infty)}(x)$. The last equation in \\eqref{free-prob-1} provides a condition (`fractional Stefan condition') for the moving boundary~$\\eta(t)$. Take $f_\\mathrm{ext}(x) = u_0 \\chi_{(-\\infty,0) \\cup (0,\\infty)}(x)$ for all $x \\in \\mathbb R$ for instance.\n\nUsing Proposition~\\ref{R-integral}, we deduce that\n\\begin{equation*}\n\\int_{-\\infty}^\\infty \\frac{1}{2} R_{\\mu,\\nu}(\\vert x - \\xi\\vert,t) \\, \\d \\xi = \\int_{-\\infty}^\\infty \\frac{1}{2} R_{\\mu,\\nu}(\\vert z \\vert,t) \\, \\d z = \\delta_{\\mu + \\nu}(t)\n\\end{equation*}\nfor any $x \\in \\mathbb R$. In particular,\n\\begin{equation}\n\\label{R-int-real}\n\\int_{-\\infty}^\\infty \\frac{1}{2} R_{1 - \\nu,\\nu}(\\vert x - \\xi \\vert,t) \\, \\d \\xi = 1, \\quad \\int_{-\\infty}^\\infty \\frac{1}{2} R_{\\nu,\\nu}(\\vert x - \\xi \\vert,t) \\, \\d \\xi = \\delta_{2 \\nu}(t).\n\\end{equation}\nAssuming that $\\varphi^+(t) = 0$ so as to be able to do some explicit calculations, \\eqref{u-sol-1} and \\eqref{u-sol-2} respectively give\n\\begin{equation*}\nu(x,t) =\n\\begin{cases}\nu_0 + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(x,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\nu_0 \\delta_{2 \\nu}(t) + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(x,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation*}\nEqs.~\\eqref{h-minus-1}, \\eqref{h-minus-2}, \\eqref{h-plus-1} and \\eqref{h-plus-2} yield\n\\begin{equation*}\nh^-(t) = \n\\begin{cases}\n1 - u_0 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n1 - u_0 \\delta_{2 \\nu}(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$},\n\\end{cases} \\quad \nh^+(t) = \n\\begin{cases}\n-u_0 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n-u_0 \\delta_{2 \\nu}(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation*}\n\nNext, let us look at the left BC. Suppose that $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$. Eq.~\\eqref{left-BC} gives\n$$\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(0+,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = 1 - u_0 \\quad \\text{or} \\quad \\D{}{0}{t}{-2 \\nu}\\varphi^-(t) = 2 (1 - u_0)\n$$\nusing Proposition~\\ref{R-a-zero} and \\eqref{conv-int}. If $\\Phi^-(s) = \\L\\{\\varphi^-(t);s\\}$, then \n$$\n\\Phi^-(s) = \\frac{2 (1 - u_0)}{s^{1 - 2 \\nu}}.\n$$ \nTherefore\n$$\n\\varphi^-(t) = 2 (1 - u_0) \\delta_{1 - 2 \\nu}(t)\n$$\nfor the Caputo case. Now suppose that $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$. This time \\eqref{left-BC} gives\n$$\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(0+,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = 1 - u_0 \\delta_{2 \\nu}(t) \\quad \\text{or} \\quad \\D{}{0}{t}{-2 \\nu}\\varphi^-(t) = 2 [1 - u_0 \\delta_{2 \\nu}(t)].\n$$\nThen\n$$\n\\Phi^-(s) = \\frac{2}{s^{1 - 2 \\nu}} - 2 u_0,\n$$\nwhich yields\n$$\n\\varphi^-(t) = 2 \\delta_{1 - 2 \\nu}(t) - 2 u_0 \\delta(t)\n$$\nfor the Riemann-Liouville case. Summarising, from the left BC~\\eqref{left-BC} we deduce that\n\\begin{equation*}\n\\varphi^-(t) = \n\\begin{cases}\n2 (1 - u_0) \\delta_{1 - 2 \\nu}(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n2 \\delta_{1 - 2 \\nu}(t) - 2 u_0 \\delta(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation*}\n\nWe now examine the right BC starting with $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$. From \\eqref{right-BC} we see that\n$$\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = -u_0.\n$$\nBut \\eqref{conv-int}, \\eqref{R-basic} and the semigroup property for the Riemann-Liouville fractional integral lead to\n\\begin{align*}\n& \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = \\int_0^t (1 - u_0) R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) \\delta_{1 - 2 \\nu}(\\tau) \\, \\d \\tau \\\\\n& \\qquad = (1 - u_0) {}_{0}^{}D_{t}^{-(1 -2 \\nu)} R_{2 \\nu,\\nu}(\\eta(t),t) = (1 - u_0) {}_{0}^{}D_{t}^{-(1 -2 \\nu)} {}_{0}^{}D_{t}^{-2 \\nu} R_{0,\\nu}(\\eta(t),t) \\\\\n& \\qquad = (1 - u_0) {}_{0}^{}D_{t}^{-1} R_{0,\\nu}(\\eta(t),t) = (1 - u_0) R_{1,\\nu}(\\eta(t),t).\n\\end{align*}\nHence the right BC for the Caputo case becomes\n$$\nR_{1,\\nu}(\\eta(t),t) = -\\frac{u_0}{1 - u_0}.\n$$\nNow let $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$. Eq.~\\eqref{right-BC} in this case is\n$$\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = -u_0 \\delta_{2 \\nu}(t).\n$$\nWe have from \\eqref{conv-int}, \\eqref{R-basic} and the semigroup property for the Riemann-Liouville fractional integral that\n\\begin{align*}\n& \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t),t - \\tau) [2 \\delta_{1 - 2 \\nu}(\\tau) - 2 u_0 \\delta(\\tau)] \\, \\d \\tau \\\\\n& \\qquad = {}_{0}^{}D_{t}^{-(1 - 2 \\nu)} R_{2 \\nu,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t) \\\\\n& \\qquad = {}_{0}^{}D_{t}^{-(1 -2 \\nu)} {}_{0}^{}D_{t}^{-2 \\nu} R_{0,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t) \\\\\n& \\qquad = {}_{0}^{}D_{t}^{-1} R_{0,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t) = R_{1,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t).\n\\end{align*}\nTherefore the right BC for the Riemann-Liouville case becomes\n$$\nR_{1,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t) = -u_0 \\delta_{2 \\nu}(t).\n$$\nIn summary, the right BC~\\eqref{right-BC} is equivalent to\n\\begin{equation}\n\\label{free-prob-1-right-BC}\n\\begin{cases}\nR_{1,\\nu}(\\eta(t),t) = -\\frac{u_0}{1 - u_0} & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\nR_{1,\\nu}(\\eta(t),t) - u_0 R_{2 \\nu,\\nu}(\\eta(t),t) = -u_0 \\delta_{2 \\nu}(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation}\n\nFinally, we consider the `fractional Stefan condition'. Observe in \\eqref{u-sol-1-der} and \\eqref{u-sol-2-der} that\n$$\n-\\int_{-\\infty}^{x} \\frac{u_0}{2} R_{\\mu,\\nu}(x - \\xi,t) + \\int_x^{\\infty} \\frac{u_0}{2} R_{\\mu,\\nu}(\\xi - x,t) \\, \\d \\xi = 0\n$$\nfor any $\\mu \\ge 0$. If $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$, then similar arguments as above give\n\\begin{align*}\n\\frac{\\partial u}{\\partial x}(x,t) \n& = -\\int_0^t \\frac{1}{2} R_{\\nu,\\nu}(x,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = -\\int_0^t (1 - u_0) R_{\\nu,\\nu}(x,t - \\tau) \\delta_{1 - 2 \\nu}(\\tau) \\, \\d \\tau \\\\\n& = -(1 - u_0) {}_{0}^{}D_{t}^{-(1 -2 \\nu)} R_{\\nu,\\nu}(x,t) = -(1 - u_0) {}_{0}^{}D_{t}^{-(1 -2 \\nu)} {}_{0}^{}D_{t}^{-\\nu} R_{0,\\nu}(x,t) \\\\\n& = -(1 - u_0) {}_{0}^{}D_{t}^{-(1 - \\nu)} R_{0,\\nu}(x,t) = -(1 - u_0) R_{1 - \\nu,\\nu}(x,t).\n\\end{align*}\nOn the other hand, if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$, then\n\\begin{align*}\n\\frac{\\partial u}{\\partial x}(x,t) \n& = -\\int_0^t \\frac{1}{2} R_{\\nu,\\nu}(x,t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = - \\int_0^t R_{\\nu,\\nu}(x,t - \\tau) [\\delta_{1 - 2 \\nu}(\\tau) - u_0 \\delta(\\tau)] \\, \\d \\tau \\\\\n& = -{}_{0}^{}D_{t}^{-(1 -2 \\nu)} R_{\\nu,\\nu}(x,t) + u_0 R_{\\nu,\\nu}(x,t) = -{}_{0}^{}D_{t}^{-(1 -2 \\nu)} {}_{0}^{}D_{t}^{-\\nu} R_{0,\\nu}(x,t) + u_0 R_{\\nu,\\nu}(x,t) \\\\\n& = -{}_{0}^{}D_{t}^{-(1 - \\nu)} R_{0,\\nu}(x,t) + u_0 R_{\\nu,\\nu}(x,t) = -R_{1 - \\nu,\\nu}(x,t) + u_0 R_{\\nu,\\nu}(x,t).\n\\end{align*}\nSummarising, the `fractional Stefan condition' becomes\n\\begin{equation}\n\\label{free-prob-1-stefan}\n-r \\D{}{}{}{2 \\nu} \\eta(t) = \n\\begin{cases}\n-(1 - u_0) R_{1 - \\nu,\\nu}(\\eta(t),t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n-R_{1 - \\nu,\\nu}(\\eta(t),t) + u_0 R_{\\nu,\\nu}(\\eta(t),t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation}\n\nIt remains to determine $\\eta(t)$. Looking at the series representation in Proposition~\\ref{R-series} and the known similarity solution of the classical diffusion equation when $\\nu = \\frac{1}{2}$, we propose the ansatz~$\\eta(t) = 2 \\alpha t^\\nu$ for some constant~$\\alpha$ to be determined. Then\n$$\n\\D{C}{0}{t}{2 \\nu} \\eta(t) = \\D{}{0}{t}{2 \\nu} \\eta(t) = \\frac{2 \\alpha \\Gamma(1 + \\nu) t^{-\\nu}}{\\Gamma(1 - \\nu)}\n$$\nand\n$$\nR_{\\mu,\\nu}(\\eta(t),t) = t^{\\mu - 1} W(-2 \\alpha;-\\nu,\\mu)\n$$\nfor any $\\mu \\ge 0$. If $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$ in the right BC~\\eqref{free-prob-1-right-BC}, then\n\\begin{equation}\n\\label{free-prob-1-trans-1}\nW(-2 \\alpha;-\\nu,1) = -\\frac{u_0}{1 - u_0},\n\\end{equation}\nwhich is a transcendental equation involving $\\alpha$ and $u_0$. However, if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$ in the right BC~\\eqref{free-prob-1-right-BC}, then\n$$\nW(-2 \\alpha;-\\nu,1)- u_0 t^{2 \\nu - 1} W(-2 \\alpha;-\\nu,2 \\nu) = -\\frac{u_0 t^{2 \\nu - 1}}{\\Gamma(2 \\nu)},\n$$\nwhich becomes an identity only when $\\nu = \\frac{1}{2}$. Hence we immediately conclude, without needing to verify the corresponding `fractional Stefan condition' in \\eqref{free-prob-1-stefan}, that the ansatz~$\\eta(t) = 2 \\alpha t^\\nu$ will not work when $0 < \\nu < \\frac{1}{2}$ for the Riemann-Liouville case. Taking $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$ in the `fractional Stefan condition'~\\eqref{free-prob-1-stefan}, we obtain\n$$\n-\\frac{2 \\alpha r \\Gamma(1 + \\nu) t^{-\\nu}}{\\Gamma(1 - \\nu)} = -(1 - u_0) t^{-\\nu} W(-2 \\alpha;-\\nu,1 - \\nu) \n$$\nor\n\\begin{equation}\n\\label{free-prob-1-trans-2}\n\\frac{2 \\alpha r \\Gamma(1 + \\nu)}{(1 - u_0) \\Gamma(1 - \\nu)} = W(-2 \\alpha;-\\nu,1 - \\nu),\n\\end{equation}\nanother transcendental equation involving $\\alpha$ and $u_0$. From \\eqref{free-prob-1-trans-1} we can solve\n$$\nu_0 = -\\frac{W(-2 \\alpha;-\\nu,1)}{1 - W(-2 \\alpha;-\\nu,1)}, \\quad 1 - u_0 = \\frac{1}{1 - W(-2 \\alpha;-\\nu,1)}.\n$$\nSubstituting these into \\eqref{free-prob-1-trans-2}, we get a transcendental equation only for $\\alpha$, namely\n\\begin{equation}\n\\label{free-prob-1-trans-3}\n\\frac{2 \\alpha r \\Gamma(1 + \\nu)}{\\Gamma(1 - \\nu)} [1 - W(-2 \\alpha;-\\nu,1)] = W(-2 \\alpha;-\\nu,1 - \\nu).\n\\end{equation}\nTherefore \n\\begin{equation*}\n\\begin{split}\nu(x,t) & = u_0 + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(x,t - \\tau) 2 (1 - u_0) \\delta_{1 - 2 \\nu}(\\tau) \\, \\d \\tau = u_0 + (1 - u_0) \\D{}{0}{t}{-(1 - 2 \\nu)} R_{2 \\nu,\\nu}(x,t) \\\\\n& = u_0 + (1 - u_0) \\D{}{0}{t}{-(1 - 2 \\nu)} \\D{}{0}{t}{-2 \\nu} R_{0,\\nu}(x,t) = u_0 + (1 - u_0) \\D{}{0}{t}{-1} R_{0,\\nu}(x,t) \\\\\n& = u_0 + (1 - u_0) R_{1,\\nu}(x,t) = \\frac{R_{1,\\nu}(x,t) - W(-2 \\alpha;-\\nu,1)}{1 - W(-2 \\alpha;-\\nu,1)}\n\\end{split}\n\\end{equation*}\nand the analytical solution of the moving boundary problem for the Caputo case is\n\\begin{equation}\n\\label{free-prob-1-sol}\nu(x,t) = \\frac{R_{1,\\nu}(x,t) - W(-2 \\alpha;-\\nu,1)}{1 - W(-2 \\alpha;-\\nu,1)}, \\quad \\eta(t) = 2 \\alpha t^\\nu,\n\\end{equation}\nwhere $\\alpha$ satisfies the transcendental equation~\\eqref{free-prob-1-trans-3}.\n\n\\begin{rem}\nWhen $\\nu = \\frac{1}{2}$, \\eqref{W-special} yields\n$$\nW\\Big(-2 \\alpha;-\\frac{1}{2},\\frac{1}{2}\\Big) = \\frac{\\mathrm e^{-\\alpha^2}}{\\sqrt{\\pi}}, \\quad W\\Big(-2\\alpha;-\\frac{1}{2},1\\Big) = 1 - \\erf(\\alpha),\n$$\nwhile \\eqref{R-special-cases} gives \n$$\nR_{1,\\frac{1}{2}}(x,t) = \\erfc\\Big(\\frac{x}{2 \\sqrt{t}}\\Big) = 1 - \\erf\\Big(\\frac{x}{2 \\sqrt{t}}\\Big).\n$$ \nTherefore \\eqref{free-prob-1-sol} simplifies to\n$$\nu(x,t) = \\frac{\\erfc(\\frac{x}{2 \\sqrt{t}}) - 1 + \\erf(\\alpha)}{\\erf(\\alpha)} = 1 - \\frac{\\erf(\\frac{x}{2 \\sqrt{t}})}{\\erf(\\alpha)}, \\quad \\eta(t) = 2 \\alpha \\sqrt{t},\n$$\nwhere $\\alpha$ satisfies the transcendental equation\n$$\nr \\sqrt{\\pi} \\alpha \\erf(\\alpha) \\mathrm e^{\\alpha^2} = 1.\n$$\nThis is of course the well-known Neumann solution of the given Stefan problem for the heat equation~\\citep{Cr84,Hi87} typically obtained through a similarity analysis.\n\\end{rem}\n\\end{ex}\n\n\\begin{ex}\nConsider the moving boundary problem\n\\begin{equation}\n\\label{free-prob-2}\n\\left\\{\n\\begin{split}\n& \\D{}{}{}{2 \\nu} u = \\frac{\\partial^2 u}{\\partial x^2}, \\quad \\eta(t) < x < \\infty, \\quad t > 0, \\\\\n& u(x,0) = -1, \\quad 0 \\le x < \\infty, \\\\\n& u(\\eta(t),t) = 0, \\quad u(\\infty,t) = -1, \\quad t > 0, \\\\\n& \\D{}{}{}{2 \\nu} \\eta(t) = \\frac{1}{r} \\Big[1 + \\frac{\\partial u}{\\partial x}(\\eta(t),t)\\Big], \\quad t > 0,\n\\end{split}\n\\right.\n\\end{equation}\nwhere $r$ is a positive constant and $\\eta(t)$ is the moving boundary. Again, we wish to find $u(x,t)$ and $\\eta(t)$. \n\nWhen $\\nu = \\frac{1}{2}$, \\eqref{free-prob-2} reduces to a Stefan problem involving a single-phase, semi-infinite, subcooled material. One application is the determination of whether ice melts or water freezes when hot water is thrown over cold ice~\\citep{Hu89}. The mathematical formulation for the heat equation is a Stefan problem with a constant heat source term in the condition at the boundary~\\citep{KiRi00}. Furthermore, a related industrial process is ablation, i.e.~a mass is removed from an object by vapourisation or similar erosive processes~\\citep{MiMy08,Mi12,MiMy12}. As in the previous example, the same physical interpretation when $0 < \\nu < \\frac{1}{2}$ is not necessarily valid so that our interest here is theoretical. We also refer to the last equation in \\eqref{free-prob-2} as a `fractional Stefan condition'.\n\nComparing \\eqref{free-prob-2} with \\eqref{gen-IBVP}, we identify $\\eta^-(t) = \\eta(t)$, $\\eta^+(t) = \\infty$, $\\kappa = 1$, $a = 1$, $b = 0$, $c = 1$, $d = 0$, $g^-(t) = 0$, $g^+(t) = -1$ and $f(x) = -1$. The last equation in \\eqref{free-prob-1} provides a condition for the moving boundary~$\\eta(t)$. Take $f_\\mathrm{ext}(x) = -1$ for all $x \\in \\mathbb R$ for example.\n\nNote that $R_{\\mu,\\nu}(\\infty,t) = \\lim_{a \\rightarrow \\infty} \\L^{-1}\\{s^{-\\mu} \\mathrm e^{-a s^\\nu};t\\} = 0$. Using \\eqref{u-sol-1} and \\eqref{u-sol-2}, we have\n\\begin{equation}\n\\label{free-prob-2-u}\nu(x,t) =\n\\begin{cases}\n-1 + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(x - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n-\\delta_{2 \\nu}(t) + \\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(x - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation}\nEqs.~\\eqref{h-minus-1}, \\eqref{h-minus-2}, \\eqref{h-plus-1} and \\eqref{h-plus-2} yield\n\\begin{equation*}\nh^-(t) = \n\\begin{cases}\n1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n\\delta_{2 \\nu}(t) & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$},\n\\end{cases} \\quad \nh^+(t) = \n\\begin{cases}\n-1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n-1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation*}\n\nFrom \\eqref{left-BC} we deduce that the left BC is\n\\begin{equation}\n\\label{free-prob-2-phi}\n\\begin{cases}\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t) - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = 1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n\\int_0^t \\frac{1}{2} R_{2 \\nu,\\nu}(\\eta(t) - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau = \\delta_{2 \\nu}(t)& \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$},\n\\end{cases}\n\\end{equation}\nwhile \\eqref{right-BC} gives the right BC\n\\begin{equation*}\n\\begin{cases}\n\\int_0^t \\frac{1}{2} \\delta_{2 \\nu}(t - \\tau) \\varphi^+(\\tau) \\, \\d \\tau = -1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\n\\int_0^t \\frac{1}{2} \\delta_{2 \\nu}(t - \\tau) \\varphi^+(\\tau) \\, \\d \\tau = -1 & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation*}\nObserve that we used Proposition~\\ref{R-a-zero}, and both Caputo and Riemann-Liouville cases have the same right BC because they also have the same $h^+(t)$. In fact, the right BC can be expressed as $\\D{}{0}{t}{-2 \\nu} \\varphi^+(t) = -2$ using \\eqref{conv-int}. If $\\Phi^+(s) = \\L\\{\\varphi^+(t);s\\}$, then \n$$\n\\Phi^+(s) = -\\frac{2}{s^{1 - 2 \\nu}};\n$$ \nthus $\\varphi^+(t) = -2 \\delta_{1 - 2 \\nu}(t)$.\n\nTo use the `fractional Stefan condition', we first calculate\n\\begin{align*}\n\\frac{\\partial u}{\\partial x}(x,t) & = -\\int_0^t \\frac{1}{2} R_{\\nu,\\nu}(x - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau,\n\\end{align*}\nwhich implies that\n\\begin{equation}\n\\label{free-prob-2-stefan}\n\\begin{cases}\nr \\D{C}{0}{t}{2 \\nu} \\eta(t) = 1 - \\int_0^t \\frac{1}{2} R_{\\nu,\\nu}(\\eta(t) - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{C}{0}{t}{2 \\nu}$}, \\\\\nr \\D{}{0}{t}{2 \\nu} \\eta(t) = 1 - \\int_0^t \\frac{1}{2} R_{\\nu,\\nu}(\\eta(t) - \\eta(\\tau),t - \\tau) \\varphi^-(\\tau) \\, \\d \\tau & \\text{if $\\D{}{}{}{2 \\nu} = \\D{}{0}{t}{2 \\nu}$}.\n\\end{cases}\n\\end{equation}\n\nHence the solution of the moving boundary problem is described by \\eqref{free-prob-2-u}, \\eqref{free-prob-2-phi} and \\eqref{free-prob-2-stefan}. It does not appear to be possible to solve for $\\varphi^-(t)$ and $\\eta(t)$ explicitly (assuming that the solutions even exist) and therefore the integral equations have to be solved numerically. Note that although $\\varphi^+(t) = -2 \\delta_{1 - 2 \\nu}(t)$ has been determined for both Caputo and Riemann-Liouville cases, the expressions in \\eqref{free-prob-2-u}, \\eqref{free-prob-2-phi} and \\eqref{free-prob-2-stefan} do not actually depend on it explicitly.\n\\end{ex}\n\n\\section{Concluding remarks}\n\nIn this article, we derived the solution of a general IBVP for the time-fractional diffusion equation using the embedding method. The formulation of the IBVP incorporates time-dependent BCs and allows the consideration of bounded and unbounded spatial domains. The solution of the IBVP generalises the results in \\citet{RoTh21} for the classical diffusion equation and in \\citet{Ro22b} for a particular class of IBVPs with Dirichlet BCs for the time-fractional diffusion equation. We then used the solution of the IBVP to solve two representative examples of moving boundary problems for the time-fractional diffusion equation. In particular, the solutin of the first problem is a `fractional' generalisation of the well-known Neumann solution for a Stefan problem for melting ice. \n\nThe embedding method gives rise to a system of integral equations for some time-dependent functions, and which needs to be solved numerically in general. The numerical solution of IBVPs and moving boundary problems for the time-fractional diffusion equation is currently work in progress. However, the numerical solution of IBVPs for the classical diffusion equation has been done in \\citet{RoTh22}. The novelty here for IBVPs for the time-fractional diffusion equation is that the linear Volterra integral equations of the first kind for $\\varphi^\\pm(t)$ now involve $R_{\\mu,\\nu}(a,t)$. Hence it is necessary to be able to compute these numerically. As this auxiliary function satisfies certain fractional integral and differential equations, a necessary first step seems to be to solve these equations numerically (e.g.~using finite differences) for $R_{\\mu,\\nu}(a,t)$ and adapt the boundary element method for solving linear Volterra integral equations of the first kind proposed in \\cite{RoTh22} for the classical diffusion equation. Other future directions are multilayer problems for the time-fractional diffusion equation and a further investigation of the properties and applications of the auxiliary function~$R_{\\mu,\\nu}(a,t)$. \n\n\n\n"} +{"id": "red-arxiv-4", "source_id": "red-arxiv_4_red-arxiv-4", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\nToday, software systems have a significant role in various domains among which are healthcare, entertainment, transport and logistics, and many more. It is only natural that with this increasing dependency on software, the number of software systems increases. Additionally, these systems become more and more complex. All this leads to a rise in the number of software faults also known as bugs. As a result, the ability to locate the source of a bug (e.g.\\ a file or a commit) is vital for the development and maintenance of efficient software solutions.\n\nBug localization refers to the automated process of discovering files that contain bugs, based on a bug report. This research project aims to make a literature review on different techniques for bug localization. This study distinguishes itself from other surveys and literature reviews \\cite{wong2016survey} in one significant way. The focus of the work is on identifying, categorizing and analyzing existing bug localization methods and tools which were evaluated in an industrial setting. To the best of my knowledge, there are no other works that prioritise this aspect. Unfortunately, such literature is scarce, therefore, bug localization techniques evaluated on open source software are also included.\n\nThe structure of the study is as follows. Section \\ref{background} provides brief explanations of relevant concepts. Then, Section \\ref{overview} contains an overview of the investigated tools and techniques. A more detailed investigation of the various techniques and a recommendation of the most beneficial approach is present in Section \\ref{discussion}. Finally, Section \\ref{conclusion} concludes this work.\n\n\\section{Background}\n\\label{background}\nThis section introduces the concepts of bug localization, open source and closed source, as well as some of the common metrics and datasets used in the investigated methods.\n\n\\subsection{Bug Localization}\nBug localization, also referred to as fault localization, is the automated process of finding the source of a given software bug based on a bug report. There is a variety of bug localization methods using different strategies and information (i.e.\\ data) to locate the origin of a bug. Among the most studied and well-known types are information retrieval based bug localization and spectrum analysis based bug localization.\n\n\\subsection{Open source and Closed source}\nOpen source software projects are publicly available for utilization and modification. Such projects are used in empirical evaluations in the majority of the scientific literature because they are easily accessible. Conversely, closed source projects are proprietary and commonly absent from academic research. Because this study is conducted for an industrial partner, the most relevant tools and techniques are the ones tested in industrial settings and/or on closed source projects. Moreover, the industrial partner is interested in solutions applicable for source code written in C and/or C++, while most open source bug localization datasets contain Java code. Unfortunately, due to their unavailability, studies using closed source projects for evaluations are scarce. Therefore, this study also includes tools evaluated on open source projects.\n\n\\subsection{Information retrieval (IR)}\nInformation retrieval refers to the process of searching for relevant information and/or metadata in a single document or a collection of documents. Web search engines can be considered as an example of IR. A web search is a query for specific information on the web - a collection of documents, and the most relevant websites are returned as top results.\n\nThere are many bug localization tools and techniques which are based on IR and share the following generalized strategy. They consider a bug report as a query and a collection of source code files (i.e.\\ the project) as a collection of documents. In an attempt to find the source of a bug, the tools find textual overlap between the query (bug report) and the documents (source code files). The source code file(s) with the biggest overlap, are reported in a ranked list, starting with the most relevant one. \n\n\\subsection{Program spectrum}\n\\label{spectrumb:based}\nA program spectrum refers to some specific execution information of a program. For example, this could be information about the execution of conditional statements. A program spectrum can be utilized for tracking program behaviour. \n\nSpectrum based bug localization is a heuristic involving three elements: a test coverage matrix (1), a hit-spectrum (2) and a fault locator (3). These elements are explained below.\n\n1: All spectrum based bug localization approaches use test coverage information. They collect information about which source code elements are covered by which tests in a test coverage matrix. The columns of the matrix contain the test cases and the rows contain the elements being tested. Each cell in the matrix contains a binary indicator of whether a code element is covered by a test case (value equals one) or not (value equals zero)\n\n2: The hit spectrum is calculated for every element (row) included in the test coverage matrix. The hit spectrum is a tuple with four values $(e_{f},e_{p},n_{f},n_{p})$. In this tuple, $e_{f}$ represents the number of failing test cases that execute the element, $e_{p}$ the number of passing ones. The number of failing and passing test cases that do not cover the element are represented by $n_{f}$ and $n_{p}$ respectively.\n\n3: Finally, an equation known as a fault locator computes the suspiciousness of all tested elements. Sorting all elements by decreasing suspiciousness produces a ranking with the most likely culprit behind a bug at the top of the ranking. The intuition behind the spectrum based bug localization is that when a certain element gets executed more often by failing test cases and less often by passing test cases, it is assigned a higher suspiciousness. Different spectrum based techniques propose their own fault locators, however, there exist some which are widely cited and used \\cite{Tarantula,Dstar, ochiai}.\n\n\\subsection{Evaluation metrics}\n\\label{evaluation:metrics}\n\\textbf{EXAM score:} This score represents the percentage of statements that must be examined before reaching the first statement containing the bug.\n\n$$ \\mathrm{EXAM} = \\frac{|\\textit{\\text{ statements examined }}|}{|\\textit{\\text{ all statements in the program }}|}\\cdot 100$$\n\\textbf{Mean Average Precision (MAP):} This is a widely used IR evaluation metric which considers all buggy files and their corresponding ranks. It is calculated by the following formula:\n\n$$ \\mathrm{MAP} = \\frac{1}{|Q|}\\cdot \\mathrm{AP}$$\n\nwhere $Q$ is the total number of queries and $AP$ is the average precision of a single query, so $MAP$ is the mean of the $AP$ for all queries. $AP$ is defined by\n\n$$ \\mathrm{AP} = \\sum_{k=1}^{M} \\frac{\\mathrm{P}(k) \\cdot \\mathrm{pos(k)}}{\\text{\\textit{number of positive instances}}}$$\n\nwhere $M$ is the number of retrieved source files, $P(k)$ is the precision at a cut-off rank $k$ and $pos(k)$ is a binary value representing whether or not the file at rank $k$ is buggy. $AP$ results in a high value when the buggy files are sorted correctly at the top of the list and results in a low value when they are scattered throughout the ranking.\n\n\\textbf{Mean Reciprocal Rank (MRR):} If the source of the bug is at the top in the ranked list of results, its reciprocal rank equals one, if it is the tenth in the list, its reciprocal rank equals $1/10$. Therefore smaller values indicate that the source of a bug is ranked low. Larger values indicate better performance. MRR is the average reciprocal rank over all queries.\n\n$$ \\mathrm{MRR} = \\frac{1}{|Q|}\\sum_{i=1}^{Q} \\frac{1}{rank_{i}}$$\n\n\\textbf{Recall at Top N:} This metric reports how many bugs with at least one file (i.e.\\ the origin of the bug) are present in the top N (N=1,5,10) results that a tool returns. Like MRR, this metric focuses on early precision. The popularity of the metric is based on the assumption that if at least one of the files causing a bug is found early, it may be easier for developers to find the other files.\n\n\\subsection{Evaluation datasets}\n\\label{evaluation:datasets}\n\\textbf{Defects4j:} Jalali et al.\\ \\cite{Defects4j} present a dataset containing 357 bugs (initially). These bugs originate from open source projects written in Java. Additionally, the dataset includes a framework with an interface that facilitates conducting and reproducing research. Because the bugs in this dataset are \"real-world\" bugs (i.e.\\ they haven't been engineered on purpose) and the interface is easy to use, Defects4j is widely used in studies on bug localization. Some of the Defects4j projects referenced in this study are Apache Commons Lang, ApacheCommons Math, JFreeChart, Joda-Time and Google ClosureCompiler.\n\n\\textbf{iBugs:} Zimmermann et al.\\ \\cite{iBugs} present the iBugs dataset. This dataset precedes the other two described in this section. It contains a few projects among which, the most notable is AspectJ, which is commonly used for evaluating bug localization projects. \n\n\\textbf{Dataset by Ye et al.\\ \\cite{ye2014learning}:} In their study Ye et al.\\ \\cite{ye2014learning} create a benchmark dataset containing six open source projects. Specifically, AspectJ, Birt, Eclipse, JDT, SWT and Tomcat. These projects are used in the original evaluations of some of the more recent works investigated in this study.\n\n\\section{Overview of tools and techniques}\n\\label{overview}\nTable \\ref{table:1} presents all the methods and tools investigated in this work as well as some important aspects such as their types, what do they localize to (e.g.\\ files, statements, commits, etc.), etc. Table \\ref{table:2} contains the number of bugs referenced in the original papers that present each of the tools. Interestingly, the numbers vary greatly from a few hundred to tens of thousands. It is likely that this inconsistency results from the difference between the types of approaches. For example, tools and techniques which are Machine Learning and Deep Learning based require a large amount of training data to optimize their performance. The study presenting DNNLoc \\cite{DNNLoc}, an approach that uses Deep Learning uses a dataset with 22 747 bugs. For other approaches such as Information Retrieval based ones, this is not needed. Therefore, the number of bugs used during their evaluations is a lot less. Finally, one of the tools, PRFL \\cite{PRFL} is evaluated on artificial bugs created through mutation. As mutations are essentially variations of code, they can be easily generated, resulting in a big number of artificial bugs. Nevertheless, there is a trend visible in the ``Type of Bug(s) used for evaluation'' column in Table \\ref{table:1}, that researchers investigating bug localization are adopting a preference for real-world data (bugs).\n\\newpage\n\\newgeometry{left=0.5cm,bottom=0.5cm}\n\\begin{landscape}\n\\begin{table}[hbt]\n\\caption{Tools and techniques considered in this study}\n\\label{table:1}\n\\begin{adjustbox}{width=1.52\\textwidth}\n\\begin{tabular}{|l|l|l|l|l|l|l|l|l|}\n\\hline\n\\multicolumn{1}{|c|}{\\textbf{Technique/Tool}} &\n \\multicolumn{1}{c|}{\\textbf{Type}} &\n \\multicolumn{1}{c|}{\\textbf{\\begin{tabular}[c]{@{}c@{}}Localize\\\\ To\\end{tabular}}} &\n \\multicolumn{1}{c|}{\\textbf{\\begin{tabular}[c]{@{}c@{}}Implementation \\\\ Availability\\end{tabular}}} &\n \\multicolumn{1}{c|}{\\textbf{\\begin{tabular}[c]{@{}c@{}}Type of Bug(s) \\\\ used for evaluation\\end{tabular}}} &\n \\multicolumn{1}{c|}{\\textbf{\\begin{tabular}[c]{@{}c@{}}Evaluation \\\\ metrics\\end{tabular}}} &\n \\multicolumn{1}{c|}{\\textbf{Evaluation dataset}} &\n \\multicolumn{1}{c|}{\\textbf{Advantages}} &\n \\multicolumn{1}{c|}{\\textbf{Disadvantages}} \\\\ \\hline\nBLUiR \\cite{BLUiR} (2013) &\n IR &\n file &\n unavailable &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- MAP\\\\ - MRR\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from four open source projects \\\\ (AspectJ, Eclipse, JDT, ZXing)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Considers structure of source code (classes, methods, \\\\ variables, comments) to increase performance (structured \\\\ information retrieval)\\\\ - Can utilize bug similarity data to increase performance\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Relies on programming constructs from object-oriented\\\\ languages\\\\ - Has a runtime overhead\\\\ - Performance relies on good naming conventions and well\\\\ written bug reports.\\end{tabular} \\\\ \\hline\nBug2Commit \\cite{B2C} (2021) &\n \\begin{tabular}[c]{@{}l@{}}IR,\\\\ Machine\\\\ Learning\\end{tabular} &\n commit &\n unavailable &\n \\cellcolor[HTML]{FFFFFF}{\\color[HTML]{000000} \\begin{tabular}[c]{@{}l@{}}- client-side crashes \\\\ (mobile app)\\\\ - serverside performance \\\\ regressions\\\\ - mobile simulation tests \\\\ for performance\\end{tabular}} &\n \\begin{tabular}[c]{@{}l@{}}- MRR\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}400 crashes\\\\ 40 perf. regressions\\\\ 550 regression tests\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Can handle complex bug reports containing \\\\ various features (e.g.\\ summary, stack trace, etc.)\\\\ - Can handle synonyms using weighted word \\\\ embedding\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Result is dependent on overlap in relevant words \\\\ between a bug report and bug source artefact.\\\\ - Not possible to weigh words from \\\\ different features differently.\\\\ - Uses corpus to weigh words, but not domain \\\\ knowledge.\\\\ - Mispredictions may potentially waste time and \\\\ resources.\\end{tabular} \\\\ \\hline\nCBT \\cite{CBT} (2012) &\n \\begin{tabular}[c]{@{}l@{}}Statistical\\\\ Debugging\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}executable \\\\ statement\\end{tabular} &\n unavailable &\n \\begin{tabular}[c]{@{}l@{}}unspecified,\\\\ (some artificially injected \\\\ bugs)\\end{tabular} &\n - EXAM Score &\n \\begin{tabular}[c]{@{}l@{}}22 programs (both C and Java)\\\\ (The Siemens suite, the Unix suite,\\\\ space, grep, gzip, make and Ant\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Only requires coverage information\\\\ - Outperforms the compared (then state-of-the-art) Tarantula \\\\ technique\\\\ - Evaluated on a large set of projects of different sizes and \\\\ programming languages\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Evaluated on a single metric\\\\ - Some of the bugs in the projects are artificial and were\\\\ seeded by the researchers.\\end{tabular} \\\\ \\hline\n\\begin{tabular}[c]{@{}l@{}}Convolutional Neural Network\\\\ \\cite {CNN} (2019)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Deep\\\\ Learning,\\\\ IR\\end{tabular} &\n file &\n \\begin{tabular}[c]{@{}l@{}}description for\\\\ re-implementation\\\\ available\\end{tabular} &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- AUC\\\\ - MAP\\\\ - MRR\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from five open source projects\\\\ (AspectJ, Eclipse, JDT, SWT, Tomcat)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Outperforms IR based bug localization techniques.\\\\ - Is able to address issues related to language semantics\\\\ (e.g.\\ synonymy)\\\\ - Efficient prediction time\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- High training time.\\\\ - Requires exessive of hardware resources - GPUs and\\\\ memory.\\\\ - Models are difficult to use by mainstream software\\\\ practitioners\\end{tabular} \\\\ \\hline\nCooBa \\cite{CooBa} (2021) &\n \\begin{tabular}[c]{@{}l@{}}Adversarial\\\\ Transfer\\\\ Learning\\end{tabular} &\n file &\n unavailable &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- MAP\\\\ - Recall at Top 10\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from four open source projects\\\\ (AspectJ, Eclipse, JDT, SWT)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Leverages the existance of projects with rich historical bug data\\\\ - Negates the transfer of private information of one project to another\\\\ (which impacts performance)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Tool evaluated only on source and target projects \\\\ written in the same programming language\\\\ - Sizes of projects and their effects on performance are \\\\ not documented.\\end{tabular} \\\\ \\hline\nD\\&C \\cite {D&C} (2019) &\n\\begin{tabular}[c]{@{}l@{}}IR,\\\\ Machine\\\\ Learning\\end{tabular} &\n file &\n available &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- MAP\\\\ - MRR\\\\ - Recall at Top N\\end{tabular} &\n Bench4BL &\n \\begin{tabular}[c]{@{}l@{}}- Significantly outperforms all state-of-the art IR tools on MAP and\\\\ MRR\\\\ - Adaptively calculates the most effective weights applied to the\\\\ similarity scores of IR features of a given pair of bug report and \\\\ source code file\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Requires extensive hardware resources\\\\ - Requires a way to split the used dataset for multi-classification\\end{tabular} \\\\ \\hline\nDNNLoc \\cite {DNNLoc} (2017) &\n \\begin{tabular}[c]{@{}l@{}}Deep \\\\ Learning,\\\\ IR\\end{tabular} &\n file &\n unavailable &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- MAP\\\\ - MRR\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from six open source projects\\\\ (AspecJ, Birt, Eclipse, JDT, SWT, \\\\ Tomcat\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Reasonable prediction time.\\\\ - Is able to link bug reports and source code that do not contain \\\\ similar wording\\\\ - Higher accuracy than other state-of-the-art\\\\ machine-learning approaches\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- High training time\\\\ - Requires extensive hardware resources\\end{tabular} \\\\ \\hline\n Legion \\cite{Legion} (2021) &\n \\begin{tabular}[c]{@{}l@{}}IR,\\\\ Machine\\\\ Learning\\end{tabular} &\n file &\n unavailable &\n \\begin{tabular}[c]{@{}l@{}}consumer-reported \\\\ issues\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Developer \\\\ expectations\\\\ - MAP\\\\ - MRR\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Seven of the most functionally\\\\ important repositories in \\\\ Adobe Analytics\\\\ product.\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Can utilize bug similarity data to increase performance.\\\\ - Is parameterised to increase performance.\\\\ - Correctly identifies a faulty file in the Top 10 recommendations\\\\ at least 70 \\% of the time during evaluation. (junior developer\\\\ expectation)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Training of the model may be computationally \\\\ expensive for big repositories.\\\\ - Is not able to correctly identify a faulty file in \\\\ the top 5 recommendations at least 80\\% of the time\\\\ during evaluation. (senior developer expectation)\\end{tabular} \\\\ \\hline\n \\begin{tabular}[c]{@{}l@{}}Patterned Spectrum Analysis\\\\ \\cite{ItemSetMining} (2016)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Patterned\\\\ Spectrum \\\\ Analysis\\end{tabular} &\n method &\n unavailable &\n real-world bugs &\n \\begin{tabular}[c]{@{}l@{}}- Wasted effort\\\\ - Recall at Top 10\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from five open source projects\\\\ (Apache Commons Lang, Apache\\\\ Commons Math, JFreeChart,\\\\ Joda-Time, Google Closure \\\\ Compiler)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Technique is highly relevant to bugs that rarely occur, but have a \\\\ significant impact such as those in integration tests\\\\ - Outperforms state-of-the-art raw spectrum analysis bug localization\\\\ techniques (in wasted effort)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Unable to rank methods which do not contain method \\\\ calls.\\\\ - Less than half of all bugs ranked in Top 10\\end{tabular} \\\\ \\hline\n\\begin{tabular}[c]{@{}l@{}}PredFL \\cite{PredFL} (2019)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Spectrum\\\\ Analysis, \\\\ Statistical \\\\ Debugging\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}executable \\\\ statement\\end{tabular} \n &\n available\n &\n real-world bugs\n &\n \\begin{tabular}[c]{@{}l@{}}- EXAM Score\\\\ - Recall at Top N\\end{tabular}\n &\n \\begin{tabular}[c]{@{}l@{}}Bugs from five open source projects\\\\ (Apache Commons Lang, Apache\\\\ Commons Math, JFreeChart,\\\\ Joda-Time, Google Closure \\\\ Compiler)\\end{tabular}\n &\n \\begin{tabular}[c]{@{}l@{}}- Combines two types of bug localization\\\\ - Can be used to further improve the performance of an existing \\\\ bug localization technique\\end{tabular}\n &\n \\begin{tabular}[c]{@{}l@{}}- Is evaluated as a complementary approach to\\\\ another bug localization technique (i.e. not standalone)\\\\ - Implementation works only on Java, as it relies on Java\\\\ Development Tools (JDT)\\\\ - Statistical Debugging requires a way to seed predicates\\\\ into the program\\end{tabular}\n \\\\ \\hline\n PRFL \\cite{PRFL} (2017) &\n \\begin{tabular}[c]{@{}l@{}}Spectrum\\\\ Analysis \\end{tabular} &\n method &\n available &\n \\begin{tabular}[c]{@{}l@{}}real world bugs,\\\\ artificial bugs\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Absolute\\\\ wasted effort\\\\ - Recall at Top N\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}Bugs from five open source projects\\\\ (Apache Commons Lang, Apache\\\\ Commons Math, JFreeChart,\\\\ Joda-Time, Google Closure \\\\ Compiler)\\end{tabular} &\n \\begin{tabular}[c]{@{}l@{}}- Boosts the performance of existing Spectrum based \\\\ techniques\\\\ - Added overhead is insignificant \\\\ (order of seconds)\\end{tabular} & \\begin{tabular}[c]{@{}l@{}}- Limited performance on multi-faults\\\\ - Not investigated whether performance\\\\ increase is sufficient in practice\\end{tabular} \\\\ \\hline\n\\end{tabular}\n\\end{adjustbox}\n\\end{table}\n\\end{landscape}\n\\pagebreak\n\\restoregeometry\n\n\n\\begin{table}[h]\n\\caption{Number of bugs used during evaluation of the tool/technique}\n\\label{table:2}\n\\centering\n\\begin{tabular}{|l|c|}\n\\hline\n\\multicolumn{1}{|c|}{\\textbf{Technique/Tool}} & \\textbf{Nr of bugs} \\\\ \\hline\nBLUiR \\cite{BLUiR} & 3 479 \\\\ \\hline\n\\begin{tabular}[c]{@{}l@{}}Bug2Commit\\\\ \\cite{B2C}\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}400 (crashes),\\\\ 40 (perf. regressions),\\\\ 550 (regressing tests)\\end{tabular} \\\\ \\hline\nCBT \\cite{CBT} & 420 \\\\ \\hline\n\\begin{tabular}[c]{@{}l@{}}Convolutional \\\\ Neural Network\\\\ \\cite{CNN}\\end{tabular} & 17 331 \\\\ \\hline\nCooBa \\cite{CooBa} & 17 513 \\\\ \\hline\nD\\&C \\cite {D&C} & 5 321 \\\\ \\hline\nDNNLoc \\cite{DNNLoc} & 22 747 \\\\ \\hline\nLegion \\cite{Legion} & 933 \\\\ \\hline\n\\begin{tabular}[c]{@{}l@{}}Patterned \\\\ Spectrum Analysis \\\\ \\cite{ItemSetMining}\\end{tabular} & 357 \\\\ \\hline\nPredFL \\cite{PredFL} & 357 \\\\ \\hline\nPRFL \\cite{PRFL} & \\begin{tabular}[c]{@{}l@{}}357 (real world bugs)\\\\ 30 692 (artificial bugs)\\end{tabular} \\\\ \\hline\n\\end{tabular}\n\\end{table}\n\n\\section{Discussion}\n\\label{discussion}\nThis section contains more detailed information about the tools and techniques presented in Table \\ref{table:1} in Section \\ref{overview}. Each approach is introduced, followed by a brief overview of its methodology. However, as some of the approaches are based on very specific knowledge (e.g.\\ deep learning) and for the sake the brevity, the methodology is briefly reported with a lot of details omitted. For the full details please refer to the original papers. In this section the terms ``technique'', ``method'' are used interchangeably.\n\n\\subsection{BLUiR \\cite{BLUiR}}\nSaha et al.\\ \\cite{BLUiR} observe that although IR is widely used for bug localization, a lot of the tools and techniques consider the source of information (bug reports, source code) as texts without any structure. They argue that the structure of source code (methods, comments, variables, etc.) and that of bug reports (title, description, etc.) can be taken advantage of to boost the performance of IR based bug localization. For example, if there exists a class named ``C'' with four variables containing ``C'', the class name doesn't have a high impact and this file could be easily overlooked if there is a file with more than five occurrences of the term. However, the relevance of the class named \"C\" should be higher than others just containing the term in some variable names. Therefore they propose BLUiR \\cite{BLUiR}, an approach that considers both the structure of queries (i.e.\\ bug reports) and documents (i.e.\\ source code) to enhance the performance of IR in bug localization. Although Saha et al.\\ \\cite{BLUiR} do not share their implementation publicly, they use the open source Indri toolkit \\cite{indri} for their retrieval model and explain in detail how their implementation works. Therefore, I believe it can be reproduced. Additionally, they do provide a link to the dataset they use for their experiments. Unfortunately, the link does not work.\n\n\\subsubsection{Methodology}\nBLUiR's process begins with building an abstract syntax tree (AST) of every source code file. This tree is traversed to collect names of methods, classes, variables and comments. All of these are tokenized and passed to Indri for stopword removal, stemming and indexing. Two improvements over previous methods are also proposed. First, unlike other IR techniques, programming keywords are not pruned. Saha et al.\\ \\cite{BLUiR} argue that words such as \"String\" might occur in some names and pruning them will decrease recall. Instead, by using the extraction of identifiers from the AST, programming keywords are excluded while keeping the ones contained in identifiers. Subsequently, identifiers are split into tokens using techniques such as camel case splitting. However, the full identifiers are kept and indexed as well. This is done as exact identifiers may be present in bug reports. In particular, a bug report may be written by knowledgeable developers or there could be a stack trace attached to the bug report. Saha et al.\\ \\cite{BLUiR} note that this small change results in a big improvement. To account for the structure of source code and bug reports, Saha et al.\\ \\cite{BLUiR} distinguish four source code fields (class, method, variable and comments) and two bug report fields (summary and description). They perform a search in each of the eight combinations (bug report field, source code field) and then sum the scores of all combinations. The benefit of this is that when a term appears in multiple fields in a file, such as class name and methods, greater importance is implicitly assigned to the file because of the summation of scores.\n\n\\subsubsection{Evaluation}\nIn order to evaluate the performance of BLUiR, Saha et al.\\ \\cite{BLUiR} use the same dataset and evaluation metrics used for the then state-of-the-art BugLocator \\cite{BL}. The dataset consists of four open source projects (Eclipse, SWT, AspectJ, ZXing). The evaluation metrics for the experiments are Recall at Top N, MRR and MAP (cf.\\ Section \\ref{evaluation:metrics}). Saha et al.\\ \\cite{BLUiR} conduct several comparisons between both their approach and BugLocator, as BugLocator outperformed many other techniques at the time and could utilize bug similarity data to further boost its performance, as is explained next.\n\nInitially, the results of BLUiR are compared with those of BugLocator without the use of bug similarity data. It is observed that on three out of four projects, BLUiR outperforms BugLocator by a great margin on all metrics. Subsequently, the results of BLUiR are compared against the improved results of BugLocator utilizing bug similarity data. Even in this case, BLUiR performs better. The authors decide to modify BLUiR to be able to use bug similarity data as well. However, this does not boost performance as much as it did for BugLocator \\cite{BL}. Saha et al.\\ \\cite{BLUiR} argue that this is because BLUiR is able to compensate for the lack of bug similarity data. Nevertheless, BLUiR can take advantage of such data whenever it is available.\n\n\\subsubsection{Advantages and Disadvantages}\nThe biggest benefit of BLUiR is that it takes into consideration the structure of source code and bug reports and does not treat them as simple text. Nevertheless, this makes the approach dependent on the use of good naming conventions and well-written bug reports. In a context in which both of these are missing, the performance may significantly decline. Moreover, the approach as described by Saha et al.\\ \\cite{BLUiR} uses object-oriented (OO) programming constructs such as class names etc. for the aforementioned fields. Therefore this approach is language dependent. Although the experiments in the study are done on Java code, the authors note that BLUiR should be easily adaptable to other OO languages. \n\nFinally, as structured information retrieval requires more computations there is a runtime overhead. This overhead depends on the size of the source code collection and it may vary between 3x and 12x. Even so, the full execution time is in seconds, meaning that the overhead may be negligible.\n\n\\subsection{Bug2Commit \\cite{B2C}}\nMurali et al.\\ \\cite{B2C} propose an information retrieval (IR) based tool which is meant to address practical concerns around industrially used IR based bug localization. The goal of the tool is to identify a bug introducing commit, based on a bug report and a list of candidate commits (gathered using time or build information from the bug report). The tool was proposed and evaluated at Facebook.\n\n\\subsubsection{Methodology}\nThere are several requirements at Facebook that a bug localization technique must fulfil. Firstly, it must localize to a commit and not to a file. Secondly, the method has to be unsupervised. Although Facebook contains a lot of historical data on bug fixes, there is no data on bug causes. This would result in a lack of labelled training data. Thirdly, the method must be capable to process complex queries and documents. Bug reports and code commits at Facebook contain several different components, referred to as features, which may vary. For example, a bug report may have various stack traces from different threads, metadata, exception message etc. Code commits also could have different features such as comments, summary, test plan, etc. Therefore, these entities cannot be simply thought of as individual collections of words. Finally, because Facebook utilizes a monolithic repository that receives thousands of commits daily containing code for several platforms, in numerous languages and conventions, the technique needs to handle word similarities and the idiosyncratic coding conventions.\n\nBased on these requirements Murali et al.\\ \\cite{B2C} investigate several existing bug localization methods of which only two are aligned with the requirements - Orca \\cite{bhagwan2018orca} and Locus \\cite{wen2016locus}. These techniques are evaluated and deemed insufficient. Consequently, the researchers propose Bug2Commit, an tool using IR and semantic word embedding to address the aforementioned requirements.\n\n\\subsubsection{Evaluation}\nThe performance of Bug2Commit is assessed on three Facebook datasets - one containing mobile app crashes, one containing server performance regressions and one containing mobile simulation tests. Recall at Top at 1, 5 and 10 and MRR are used as evaluation metrics (cf.\\ Section \\ref{evaluation:metrics}). When compared against Orca \\cite{bhagwan2018orca} and Locus \\cite{wen2016locus}, Bug2Commit performs better or at least as well.\n\nWhen analysing the three cases individually Murali et al.\\ \\cite{B2C} observe that the tool is beneficial for server-side performance regressions and mobile app crashes. In the former, the tool reduces the time spent on localizing from hours (time without the use of the tool) to minutes. In the latter, for a period of 6 months, Bug2Commit successfully localizes four out of seven bugs. Among the three unsuccessful cases, two contain corrupted data and one is miss-predicted. Finally, the mobile simulation tests differ from the other two cases as the consumer of the localization is a system rather than a human. In this case, it is observed that synonymous words are used within the query and candidates. Like the previous techniques (i.e.\\ Orca, Locus) Bug2Commit is unable to localize efficiently. Thus, the researchers use Bug2Commit with the weighted word embedding model. This version of the tool is able to capture word semantics well.\n\n\\subsubsection{Advantages and Disadvantages}\nOne benefit of Bug2Commit is that it can be applied in scenarios in which the sources of information are complex entities. Both bug reports and the code commits may contain multiple components. Additionally, due to the weighted word embedding, the tool is able to grasp the semantics of words.\n\nNevertheless, Word overlap between the origin of a bug (i.e.\\ a commit) and the bug report is needed for Bug2Commit to work correctly. Applied in cases where there is insufficient overlap, the performance of the tool may decrease drastically. Another disadvantage is the lack to prioritize words from a feature (e.g.\\ bug report summary, bug report title, etc.). In some scenarios, bug titles may contain more relevant information than other features. However, only word frequency is used in calculating the word weight, while domain knowledge is not. Finally, mispredictions may lead to resource and time waste, therefore caution needs to be applied.\n\n \\subsection{CBT \\cite{CBT}}\nCrosstab based technique (CBT) \\cite{CBT} is a statistical technique that utilizes code coverage information on the level of executable statements as well as the outcome of all test cases (i.e.\\ whether a test passed or failed) to localize bugs. The name of the technique comes from the use of a table depicting the relationship between two or more variables referred to as a crosstab. Such a table is computed for every (executable) statement and a statistic is used to calculate the statement's suspiciousness. The main difference between CBT and heuristic based techniques (such as some of the spectrum based techniques) is the use of well-defined statistical analysis which is not present in heuristic based techniques.\n \n\\subsubsection{Methodology}\nAs previously stated, crosstabs are used for investigating the relationship between two or more variables. Wong et al.\\ \\cite{CBT} construct crosstabs for each executable statement. Each crosstab contains two column-wise variables \\textit{covered} and \\textit{not covered} and two row-wise variables \\textit{successful} and \\textit{failed}. For every crosstab, a hypothesis test is made in order to check for a dependence relationship between the coverage of a statement and the result of the program execution. The chi-square test is used when deciding whether to accept or reject the hypothesis that the coverage of the statement and the execution result are independent. Laghari et al.\\ \\cite{CBT} note that the degree of association between the coverage of a statement and the execution result is more interesting than the (in)dependence relationship itself. To that end, several fractions such as the number of failed tests over the number of all tests help in determining whether the coverage is associated more with the failed or the successful execution result. Using all the gathered information, statements are then separated into five classes. Four of these classes contain statements with a (high or low) degree of association between their coverage and the (failed or successful) execution result. The fifth class contains statements with independent coverage and execution result. The statements from the first class (i.e.\\ high association degree between coverage and failed execution result) are presumably the most likely for containing bugs.\n\n\\subsubsection{Evaluation}\nWong et al.\\ \\cite{CBT} perform an extensive evaluation by investigating several aspects. The main evaluation metric used through the experiments is the \\textit{EXAM score} (cf.\\ Section \\ref{evaluation:metrics}).\n\nFirstly, the performance of CBT is evaluated against that of Tarantula \\cite{Tarantula}, a well-known spectrum based technique. The performances of both these approaches are recorded on both large and small projects (i.e.in terms of \\textit{lines of code} (LOC)). The small projects (i.e.\\ less than 1000 LOC) used are a part of the Siemens and Unix suites which have been used in many fault localization studies (before 2014). Both suites consist of multiple programs, all written in C. Because both CBT and Tarantula output ranked lists in which several statements could have the same suspiciousness value, Wong et al.\\ \\cite{CBT} consider two cases per approach. The first case is referred to as the best case, in which a statement containing a bug is examined before statements that do not contain a bug and have the same rank. The second case is the exact opposite. Therefore it is referred to as the worst case. In experiments done on the Siemens dataset, not only CBT's best case is better than that of Tarantula, but its worst case is also better than Tarantula's worst. The two techniques are compared on larger projects as well. Namely, on the space program, grep, gzip, and make. However, the size of the projects does not impact the results of the comparison. CBT seems to outperform Tarantula, except for two test cases.\n\nSecondly, CBT is evaluated against two other statistical approaches. Namely, Sober\\cite{liu2005sober} and Liblit05 \\cite{Liblit}. This evaluation contains both a qualitative and a quantitative aspect. In the quantitative evaluation, CBT always produces better results than Liblit05 generally outperforms Sober.\n\nIn addition, Wong et al.\\ \\cite{CBT} also investigate the effectiveness of CBT in another language - Java, as all previously mentioned programs are in C. For that, a comparison between CBT and Tarantula is performed on the software project Apache Ant. In this comparison the difference between the performance of both approaches is significant. specifically, the worst performance of CBT is better than the best of Tarantula.\n\n\\subsubsection{Advantages and Disadvantages}\nEven though the evaluation done by Wong et al.\\ \\cite{CBT} presents CBT as a very efficient technique there are several considerations to be made. Firstly, Wong et al.\\ \\cite{CBT} utilize mutation based fault injection to generate artificial bugs. Therefore, not all bugs used for the experiments can be considered real. Secondly, although CBT significantly outperforms Tarantula, at the current point in time of writing this study, there are many newer works on spectrum based bug localization proposing better performing than Tarantula methods.\n\n\\subsection{Convolutional Neural Network \\cite{CNN}}\nDeep Learning (DL) \\cite{deng2014deep} has been employed in a variety of fields and has gained popularity due to its boosted performance over that of general Machine Learning (ML) approaches in areas such as image processing \\cite{krizhevsky2012imagenet}, speech recognition \\cite{hinton2012deep} and others. Deep learning based models can be used for bug localization as well, as linking bug reports and source code can be transformed into a classification problem. Moreover, Deep learning can address one of the big challenges of bug localization - the lexical gap between bug reports and source code. More specifically, the use of synonymous words in bug reports and source code files. Polisetty et al.\\ \\cite{CNN} investigate the benefits for developers of applying deep learning based models for bug localization by evaluating the performance of a Convolutional Neural Network (CNN) and Simple Logistic Model against that of several existing bug localization techniques. \n\nAs a motivation for the study Polisetty et al.\\ \\cite{CNN} cite a survey \\cite{kochhar2016practitioners} in which 386 software practitioners across 5 continents provide their expectations of bug localization research. One of the insights from the survey is that even the tools and approaches with the best performance cannot satisfy 75\\% of the participants. Therefore, the authors explore whether state-of-the-art Deep Learning based models satisfy the expectations of practitioners and how do these models perform in comparison to other bug localization techniques.\n\n\\subsubsection{Methodology}\nTo evaluate the two models (CNN and Simple Logistic Model) the researchers extract the source code for each open source project and using publicly available bug reports and bug-commit mappings, label each file as \\textit{buggy} or \\textit{/non-buggy} based on whether the file is changed in a bug fixing commit. For each project, Polisetty et al.\\ \\cite{CNN} construct three datasets of source code. One containing all files in a project, one containing just buggy files and one containing very buggy files (i.e.\\ files linked to multiple bugs). This is done to investigate the effect of varying buggy files on performance. After extracting the data, it is pre-processed by removing numbers, punctuation symbols, programming-related keywords. Stemming and the application of other pre-processing schemes used in other DL related studies is done as well. Then a traceability matrix is constructed, which is the Cartesian product of the list of all source files and bug reports. For every record in this matrix, the corpus of the bug report and that of the source code are merged. With this, the data is prepared for training and testing the models. For each dataset, 90\\% of the data is used for training and validation and 10\\% is used for testing.\n\n\\subsubsection{Evaluation}\nPolisetty et al.\\ \\cite{CNN} evaluate the performance of multiple models on five open source bug localization datasets - AspectJ, Tomcat, SWT, Eclipse, JDT. Two of the models are implemented and evaluated directly and the others are accessed through the means of publicly available statistics. The two implemented models are a CNN with an architecture proposed by Kim \\cite{kim2014convolutional} and a traditional ML technique - Simple Logistic model \\cite{landwehr2005logistic}.\n\nWhen comparing the performance of the CNN to that of the Simple Logistic Model, the researchers conclude that the former outperforms the latter in most cases, but has higher training time. Additionally, training Simple Logistics models is faster but they require a big amount of memory (in the magnitude of terabytes). Moreover, the computational resources, needed for training both the DL and ML bug localizing models, are both expensive and difficult to obtain.\n \nWhen comparing the CNN model to another CNN \\cite{huo2017enhancing}, the implementation of Polisetty et al.\\ \\cite{CNN} performs worse. However, they argue that this is because the other implementation is run on a reduced set of source files, which impacts performance and therefore can also be misleading to software practitioners. \n\nFinally, comparing the performance of the CNN to that of IR techniques, such as BugLocator\\cite{BL} and BLUiR \\cite{BLUiR}, the CNN outperforms the other techniques in every metric.\n\nLastly, the researchers discuss the effect of varying buggy files on performance. They note that the CNN model is considerably better performing on smaller datasets. Coincidentally, most DL based models consider only a subset of source code files when evaluating performance. Therefore, this overly-optimistic performance evaluation can be misleading to developers.\n \n\\subsubsection{Advantages and Disadvantages}\nThe CNN that Polisetty et al.\\ \\cite{CNN} evaluate outperforms the widely cited and best performing IR based techniques. This may be because the CNN addresses the challenge with which IR-techniques struggle. Namely, the use of synonymous words in source code and bug reports. Additionally, the model has an efficient prediction time (4 seconds per 1000 files). However, these benefits come with a trade off. Firstly, high training time. While the model can handle projects with millions of lines of code the training time would be in the magnitude of months. An excessive amount of hardware resources is also needed (e.g.\\ GPUs and memory). Polisetty et al.\\ \\cite{CNN} further note that such a type of technique is difficult to use by mainstream software practitioners. \n\n\\subsection{CooBa \\cite{CooBa}}\nZhu et al.\\ \\cite{CooBa} observe that although there is a multitude of supervised machine learning based bug localization techniques, which show promising results, these techniques require rich historical data for training purposes. They argue that this may not always be achievable, such as in the case of a newly developed project. There exist cross-project bug localization techniques which are based on the idea of transferring knowledge from a project, rich with historical data (source project), to a project lacking this data (target project). However, these techniques fail to capture the uniqueness of a project. As a result, the performance of such methods is negatively impacted. Therefore, Zhu et al.\\ \\cite{CooBa} present an approach based on adversarial learning called Cross-project Bug Localization via Adversarial Transfer Learning (CooBa) which outperforms other state-of-the-art cross-project bug localization techniques.\n\n\\subsubsection{Methodology}\nInitially, a set of bug reports and a collection of source code files for the source project (i.e.\\ the project with historical bug data) and the same sets for the target project (i.e.\\ the project which will leverage this data) are collected. In addition, two indicator matrices are constructed per project. These matrices indicate whether a file is buggy or not w.r.t a bug report. The cross-project bug localization is initiated as a classification learning task. During the training of the framework, the goal is to learn prediction functions by input pairs (bug report, source code file) from both the source and target projects. After training the model, the prediction function for the target project is used to compute the relationship of each pair of a bug report and source code file in the target project. There are three integral parts in the model - shared bug report processing, cooperative code file processing and relevance prediction. The cooperative code file processing contains four components, one of which is the public feature extraction. It is in this component where the adversarial training is used to guarantee the effective learning of public features. The adversarial training methodology \\cite{goodfellow2014explaining} usually refers to the use of a generator and a discriminator which are considered as adversaries. In CooBa there is a shared feature extractor that acts as a generator and a discriminator that tries to distinguish from which project (source or target) is a code file from.\n\n\\subsubsection{Evaluation}\nThe performance of CooBa is compared against that of both other cross-project bug localization methods, and techniques that work within a single project, refereed to by Zhu et al.\\ \\cite{CooBa} as \\textit{within-project} methods. Among these methods, two are investigated in this study. Namely, BugLocator \\cite{BL} and DNNLoc \\cite{DNNLoc}. The evaluation dataset consists of four open source projects - AspectJ, Eclipse, JDT, SWT. For the evaluation, one of the projects is used as a source project and another is used as a target project (e.g.\\ source: AspectJ, target: Eclipse). All combinations of such pairs are considered. The evaluation results in two main observations. First, \\textit{within-project} techniques trained on one (source) project do not perform well when directly used on another (target) project. This indicates the need for special cross-project bug localization techniques. Secondly, CooBa \\cite{CooBa} outperforms other cross-project techniques w.r.t both MAP and Recall at Top 10 (cf.\\ Section \\ref{evaluation:metrics}).\n\nIn addition, Zhu et al.\\ \\cite{CooBa} evaluate the benefits of the adversarial transfer learning. They do so by changing the model to CooBa* which does not utilize adversarial transfer learning and compare its performance to that of CooBa on all project pair combinations using the MAP metric. CooBa outperforms CooBa* which indicates that without the use of adversarial transfer learning, noise from one project is brought with the transfer of knowledge. This noise negatively impacts bug localization.\n\n\\subsubsection{Advantages and Disadvantages}\nThe ability to utilize the historical bug data of one project in order to perform bug localization on another is CooBa's biggest advantage. However, Zhu et al.\\ \\cite{CooBa} only consider the scenario in which both projects are written in the same programming language. In addition, the study lacks an analysis of how the size of the source project and the amount of historical bug data impacts performance and is there an optimal size and/or amount of bug data.\n\n\\subsection{D\\&C \\cite{D&C}}\nKoyuncu et al.\\ \\cite{D&C} who propose the Divide and Conquer method (D\\&C) motivate their study on the observations of a previous study by Lee et al.\\ \\cite{lee2018bench4bl}. In particular, the observations that although newer and better IR based bug localization tools and techniques are constantly being introduced, they (generally) do not get adopted by software practitioners. Primarily, this is due to the limited performance of the state-of-the-art methods and the absence of detailed validation on the importance of different IR features. Koyuncu et al.\\ \\cite{D&C} investigate six state-of-the-art IR based bug localization methods (BugLocator, Brtracer, BLUiR, Amalgram, Blia and Locus) and observe that a significant portion of bugs gets localized exclusively by each of the tools. They argue that different tools are more appropriate for specific sets of bugs reports. In addition, they find additional connections between the performance of the tools and specific sets of IR features. This prompts Koyuncu et al.\\ \\cite{D&C} to develop an approach in which the weight of similarity scores between the features of the source code and those of the bug report are learned for different groups of bug reports. That is the Divide and Conquer method (D\\&C) \\cite{D&C}.\n\n\\subsubsection{Methodology}\nIn order to adaptively compute the most efficient weights for the similarity scores of IR features, Koyuncu et al.\\ \\cite{D&C} decide to use a supervised learning technique in which examples from the dataset are used to learn a classification. Furthermore, a multi-classifier approach is chosen. This approach consists of building and training several classifiers, each one trained on a different part of the dataset. Instead of choosing one classifier at the end, the output of all of them is combined by taking the average of the prediction probabilities.\n\nIn their investigation of the state-of-the-art tools, Koyuncu et al.\\ \\cite{D&C} observe that there exist sets of bug reports that are only localized by a certain tool and that there is a set successfully localized by all the tools. This observation and the one that the difference between the tools is primarily in the IR features that are considered leads Koyuncu et al.\\ \\cite{D&C} to separate the dataset into regions based on the performance of the tools. This is done to provide relevant data for the various classifiers. For example, there is a region for each state-of-the-art tool containing the bug reports on which the tool performed best. Classifiers trained on these regions are referred to as region-specific classifiers. There are other regions as well, such as the one containing data on which all the tools performed well. Subsequently, for each region, the data is separated into sets for validation and training. However, the data for each region is very imbalanced as a small part of the pairs of bug reports and source files are buggy. The classification algorithm LightGBM, which can account for data imbalances, is used to address the issue. \n\nEach classifier is trained in 10 000 iterations in combination with early stopping, an approach used to avoid overfitting. The training stops when there is no further improvement and the best model is found. Subsequently, the best model of each classifier is used on the relevant parts of the data to obtain the probability values for each combination of a bug report and source file. \n\nA combined output ranking is made by averaging the probabilities for source code files in each model.\n\n\\subsubsection{Evaluation}\nD\\&C is evaluated on the Bench4BL which at the time contained data from 46 projects written in Java. Originally the dataset contained 8 652 reports, however, for the evaluation of D\\&C it was cleaned from bug reports linked to missing files or ones assumed to be post-fix activities. In particular, these are bug reports having the same person as reporter and fixer.\nThe evaluation metrics are the widely used MAP, MRR and Recall at Top 1,5,10 (cf.\\ Section \\ref{evaluation:metrics}).\n\nInitially, D\\&C is compared against each of the studied state-of-the-art tools individually. From this comparison, Koyuncu et al.\\ \\cite{D&C} observe that D\\&C is able to localize more bugs than all state-of-the-art tools at Top 1, Top 5 and Top 10 with up to 13\\% at Top 1. In addition, there is an improvement in MAP and MRR as well. Then, another comparison is made on the project level. Meaning that the performance of the state-of-the-art tools is compared against those of D\\&C for each individual project in the dataset (having D\\&C trained on the entire dataset). In these experiments, the MAP and MRR performance vary significantly, with some dropping very low. Nevertheless, D\\&C outperforms other tools for the majority of projects. Finally, the impact of the multi-classification is evaluated by analysing the performance of specific classifiers. There are several interesting observations. Firstly, D\\&C outperforms the region-specific classifiers and the classifier trained on bug reports which were localized at Top 1 at least by one tool. This supports the claim that D\\&C finds an efficient way to compute the most effective weights for the similarity scores of IR features. Secondly, the classifier trained on the data for which no tool was successful at Top 1, performs better than some of the region-specific classifiers. Koyuncu et al.\\ \\cite{D&C} take this as a confirmation for the need to divide the dataset.\n\n\\subsubsection{Advantages and Disadvantages}\nIn the study of Koyuncu et al.\\ \\cite{D&C} D\\&C has been trained on heterogeneous data (i.e.\\ from different projects). However, the investigation of the performance after training on a single project is scarce. Koyuncu et al.\\ \\cite{D&C} do train D\\&C on the data from the biggest project in the dataset, but do not report specific statistics about its performance. Furthermore, the impact of the number of bugs reports is not mentioned, in all of the experiments D\\&C is trained on the entire dataset. While D\\&C is able to outperform several widely cited IR based bug localization the lack of the above data needs to be considered. Finally, its practicality is also questionable. Koyuncu et al.\\ \\cite{D&C} split the dataset based on the performance of other state-of-the-art tools. This might not be optimal or feasible for software practitioners.\n\n\\subsection{DNNLoc \\cite{DNNLoc}}\nDNNLoc \\cite{DNNLoc} is a method that is designed to alleviate the already mentioned challenge for IR based bug localization - the use of synonymous language, also referred to as lexical mismatch, by Lam et al.\\ \\cite{DNNLoc}. It does so by using deep neural networks (DNNs) which learn to connect words from a bug report to different terms in source code files.\n\n\\subsubsection{Methodology}\nThe model is trained by creating two types of pairs between source code and bug reports. The first type is the positive pair and it is between a bug report and a file containing the bug cause. The second type, the negative pair, is between a bug report and files containing textual similarity to the bug report, however, not the bug cause. For each pair, several features are collected and feature vectors are constructed. Features are collected in the following way.\n\nThe bug reports are parsed and preprocessed using traditional IR techniques such as stopword removal and stemming. Identifiers mentioned in the bug report are not removed. Instead, they are split into words that are also stemmed. However, the full identifiers are kept as well. After the prepossessing Term Frequency - Inverse Document Frequency, a traditional IR technique, is used to calculate the significance weights of the words. These weights are the features of the bug report.\n\nThe source code files contain four features of interest. Identifiers, names of API classes and interfaces, comments and textual descriptions of API methods and classes. These are all extracted and processed similarly as bug reports.\n\nThe textual similarity between a bug report and a source code file is also considered. It is calculated using the rVSM model utilized by Bug2Commit \\cite{B2C} as well. Lam et al.\\ \\cite{DNNLoc} note that the textual similarity and the relevancy feature (computed at a later point) will suit each other and contribute to linking a bug report and source files, both containing and lacking similar words.\n\nBug fixing and metadata features are collected as well. Among these is a score representing how recent a file has been fixed for a bug, and a score for the number of times a file has been fixed before a certain bug report. Lam et al.\\ \\cite{DNNLoc} argue that such metadata has been previously shown to improve performance.\n\nFinally, three DNN models are employed. The first one is an autoencoder which reduces the dimensions of the feature vectors. The benefit of doing so is twofold. On one hand, it reduces computational costs and makes the approach more scalable. On the other hand, redundant information is removed in the process. The output of the autoencoder is used as input to the second DNN - a relevancy estimator. This DNN produces a score on how relevant a source file is to a bug report. The last DNN takes all previously mentioned features and computes a single score which indicates how relevant a source file is to a bug report. Such scores are calculated for all files and ranked. \n\n\\subsubsection{Evaluation}\n\nLam et al.\\ \\cite{DNNLoc} conduct a series of experiments on a dataset provided in a previous study containing six open source projects. They use three widely employed metrics. Namely, Recall at Top N, MAP, and MRR (cf.\\ Section \\ref{evaluation:metrics})\n\nInitially, the importance of the different features (e.g.\\ relevancy feature, metadata etc.) is tested. On its own, the DNN computed relevancy is very inaccurate. However, combining it with the metadata feature significantly increases its accuracy. The textual similarity is more accurate than both previously mentioned components. However, combining it with the relevancy and metadata features improve its results by 2,5-8\\%. Furthermore, Lam et al.\\ \\cite{DNNLoc} note that in 18 cases neither the textual similarity nor the relevancy score, rank the correct file in the top 20, however, when combined (i.e.\\ a component considering both) they do.\n\nSubsequently, the performance of the DNN is tested. Specifically, accuracy with varying sizes of the DNN Estimator (i.e.\\ number of nodes), accuracy with different training data sizes and the ability to link terms in source files with words in bug reports. Lam et al.\\ \\cite{DNNLoc} observe that the DNN is able to recognize connections between semantically connected words. For example, the DNN correctly identifies that the word context is relevant to the terms \"ctx\" and \"envCtx\".\n\nLam et al.\\ \\cite{DNNLoc} do also compare the results of DNNLoc against those of several state-of-the-art bug localization techniques. One based on IR - BugLocator \\cite{BL}, one based on ML - Naive Bayes and a hybrid one, a Learn to Rank method by Ye et al.\\ \\cite{ye2014learning}. In comparison to BugLocator, DNNLoc achieves significant improvement on Top 1 and 5 accuracy. Lam et al.\\ \\cite{DNNLoc} argue that on one hand, the DNN model addresses the lexical gap challenge. On the other hand, the metadata features additionally enhance accuracy. DNNLoc outperforms the Naive Bayes and the Learn to rank approach as well due to some inherent weakness of those methods.\n\n\\subsubsection{Advantages and Disadvantages}\nDNNLoc shows that deep learning can indeed improve the results achievable with IR and can address the lack of word overlap between bug reports and source files. In addition, although rSVM is used in the study by Lam et al.\\ \\cite{DNNLoc}, these results may be improved even further by experimenting with another IR method. Unfortunately, as with other deep learning bug localization techniques, there is a computational cost. Lam et al.\\ \\cite{DNNLoc} do not explicitly mention the time efficiency of DNNLoc apart from stating that training time is large. They do state that this can possibly be alleviated by parallel computing for DNN, however, this is not investigated. The prediction time for a single bug report is within minutes.\n\n\\subsection{Legion \\cite{Legion}}\nLegion \\cite{Legion} is an extended implementation of the well-known bug localization approach based on information retrieval (IR) BugLocator (BL), proposed by Zhou et al.\\ \\cite{BL}. Jarman et al.\\ \\cite{Legion} who propose Legion are initially interested in investigating the merit of BugLocator for developers in Adobe Analytics. Specifically, they are interested in whether the tool can adhere to developer expectations defined for two groups of developers. For the first group - developers new to a repository, the solution needs to correctly identify a faulty file in the top 10 recommendations at least 70\\% of the time. For the second group consisting of developers familiar with a repository, the solution needs to correctly identify a faulty file in the top 5 recommendations at least 80\\% of the time.\n\n\\subsubsection{Methodology}\nAs in Adobe Analytics, Jira is used for issue tracking and Git is used for source control, the following process is employed in preparation for the BugLocator evaluation. In every repository, all commit messages are scanned for a Jira issue id relating to a bug. Whenever one is found, information about it is collected from both Git and Jira, such as commit message and id, bug description, bug summary, reporter, etc. To avoid data-set biases reported by prior bug localization studies, the researchers consider customer-reported bugs because formal procedures are applied for their handling.\n \nThe performance of BugLocator is evaluated on 933 bugs, using the same evaluation metrics as those originally used by Zhou et al.\\ \\cite{BL} - MAP, MRR and Top 1,5,10. In comparison to the evaluation results in the original paper (i.e.\\ experiments on 4 open source projects), the results on the repositories at Adobe are worse. Because these results do not meet the developer expectations, the researchers decide to extend BugLocator with additional corpora and configurable parameters. BL+ is the result of this extension.\n\nBL+ has four configurable parameters, one for pre-processing (2 options) and three for the computation of a \\textit{rSVMScore} (63 options), a \\textit{SimiScore} (2 options) and a \\textit{FinalScore} (11 options). However, these additional parameters and all their possible combinations result in 2,772 possible configurations. For each bug, it is observed that a different configuration may perform best, but without a way of knowing the optimal configuration apriori, the researchers decide to use all configurations. They do so by computing the final score for each configuration as well as the sum of scores, referred to as a \\textit{stacked score}. The stacked score and individual scores are then used as features along labels indicating whether a file is buggy, to learn a supervised model using Random Forest, which scores the features to optimize results. Legion is the name of this final solution containing BL+ and the supervised model.\n\n\\subsubsection{Evaluation}\nFor the evaluation, seven Adobe Analytics repositories of high importance are chosen. In terms of functionality, these repositories cover user interface components, microservices, backend collections, etc. Six of the seven repositories contain between 300 - 700 source code files and one is considerably bigger with the number of files ranging between 2500 and 5000.\n\nWhen comparing the three implementations (i.e.\\ BL, BL+ and Legion), BL+ boosts the results of BugLocator by 29.0\\%, 14.1\\% and 8.7\\% in terms of Top 1, 5, and 10. Legion improves the same scores by 143.4\\%, 58.1\\% and 36.4\\%, respectively. In terms of time efficiency Legion takes 15.12 minutes on average for training and testing of the Random Forests for six repositories. For the seventh, largest repository, running all the 2,772 BL+ parameter configurations takes 34.8 minutes on average. This leads to the conclusion that the time efficiency of applying the bug localization technique on a bug report is highly dependent on the size of the corpora and the number of files in a repository. Furthermore, the researchers argue that Legion's time efficiency can be considered from two perspectives. The first perspective is the time it takes to train the model, which is of little concern for Adobe Analytics as training may be run as a background process periodically, while older versions of the model handle incoming bug reports. The second, and more important perspective, is the time it takes to run the trained model. At the time of the study, the process of getting a bug in the hands of the right developer took days, this provides ample time for Legion to run and construct a list of potential buggy files. The researchers conclude that BugLocator can be useful in an industrial setting and that with some augmentation it is able to fulfil one developer expectation, that of developers new to a repository.\n\n\\subsubsection{Advantages and Disadvantages}\nWhile Legion has the ability to leverage bug similarity data due to being an extension to BugLocator, its main benefit comes from having adjustable parameters. In combination with supervised learning, an optimal configuration of parameters can be chosen, providing the best results. Additionally, Jarman et al.\\ \\cite{Legion} note that at least 70\\% of the time the tool correctly identifies a faulty file in the Top 10 recommendations. Nevertheless, supervised learning is computationally expensive, especially for big repositories.\n\n\\subsection{Patterned Spectrum Analysis \\cite{ItemSetMining}} \nLaghari et al.\\ \\cite{ItemSetMining} present the patterned spectrum analysis as a bug localization approach. They consider continuous integration as an important testbed for bug localization. That is why they motivate their study by describing several scenarios of bugs occurring in a project using continuous integration based on multiple discussions with software engineers. Laghari et al.\\ \\cite{ItemSetMining} note that integration tests provide a good context for bug localization research. Bugs in these tests occur rarely, however, when they do, both their complexity and impact are significant. Furthermore, finding such bugs can take hours and often has the largest priority, resulting in all other work being postponed until the bug has been resolved.\n\n\\subsubsection{Methodology}\nThe Patterned Spectrum Analysis beings by collecting traces for every test case. The trace contains data about the calls made to other methods whenever a test invokes a method from the base code. This data consists of a\\textit{ caller object id} (object calling the method), \\textit{caller id} (method from which the call is made) and a \\textit{callee id} (called method). Subsequently, the trace is sliced into individual method traces for each executed method in the test case. The individual traces are processed by a closed itemset algorithm and turned into closed itemsets referred to as call patterns. A suspiciousness score is calculated and allocated to each call pattern by using a test coverage matrix and the call patterns of each method. This is done by using a fault locator (cf.\\ Section 2). Finally, the suspiciousness of an individual method is the maximum suspiciousness of its call patterns.\n\n\\subsubsection{Evaluation}\n\\label{Itemset:eval}\nLaghari et al.\\ \\cite{ItemSetMining} conduct experiments on 351 bugs from a subset of the dataset Defects4j (cf.\\ Section \\ref{evaluation:datasets}). The obtained results are compared against those of spectrum based bug localization (cf.\\ Section \\ref{spectrumb:based}) referred to by Laghari et al.\\ \\cite{ItemSetMining} as raw spectrum analysis. The raw spectrum analysis in the experiments uses the Ochiai fault locator \\cite{ochiai}. The subset used for evaluation consists of Apache Commons Math, Apache Commons Lang, Joda-Time, JFreeChart, and Google Closure Compiler. Although the Defects4j dataset does not differentiate projects in terms of unit and integration tests, Laghari et al.\\ \\cite{ItemSetMining} perform an experiment and provide circumstantial evidence that the tests in the Google Closure Compiler are close to integration tests. This is important because Laghari et al.\\ \\cite{ItemSetMining} assume that integration tests execute multiple methods from different classes. Therefore, the spectrum analysis will contain more traces. In the cases of unit testing, the traces will be fewer.\n\nLaghari et al.\\ \\cite{ItemSetMining} use \\textit{wasted effort} as an evaluation metric which represents the number of results (methods) in the ranked list needed to be examined before getting to the method causing the bug.\n\n$$\\text{\\textit{wasted effort}}=\\frac{m +(n+1)}{2}$$\n\nIn the equation, $m$ represents the number of methods without bugs ranked strictly higher than the bug causing method and $n$ represents the methods without bugs with a rank equal to the one of the bug causing method.\n\nThe first observation is that patterned spectrum analysis results in less wasted effort than that of raw spectrum analysis. The strengths of the patterned spectrum analysis are especially visible when observing a certain bug in the Closure project, originating from a method with a unique call pattern in all failing test cases. This method is easily picked up by the patterned spectrum analysis, resulting in wasted effort of $0.5$ (i.e.\\ the method is ranked highest in the result list). The wasted effort for the same bug when using raw spectrum analysis is $183$.\n\nWhen evaluating how often is the wasted effort of both patterned and raw spectrum analysis $\\leq 10$ (i.e.how many bugs are localized in the Top 10), Laghari et al.\\ \\cite{ItemSetMining} note that although the wasted effort of patterned spectrum analysis is lower than that of the raw one, a significant part of bugs is not contained in the Top 10.\n\nThe final evaluation investigates what is the effect of the number of triggered methods on the wasted effort. Interestingly, Laghari et al.\\ \\cite{ItemSetMining} observe that the number of triggered methods has a significant impact on raw spectrum analysis and almost none on patterned spectrum analysis.\n\n\\subsubsection{Advantages and Disadvantages}\nIn terms of wasted effort, the proposed patterned spectrum analysis provides better results than those of the raw spectrum analysis on the 351 bugs from the dataset. In addition, the patterned spectrum analysis ranks more bug causes in the Top 10 ranked results. Furthermore, Laghari et al.\\ \\cite{ItemSetMining} hypothesise that patterned spectrum analysis performs significantly better than other spectrum based bug localization techniques when integration tests or tests of a similar structure are present, as in the case of the Google Closer Compiler. However, an inherent disadvantage of this approach is that methods that do not execute calls will always be placed at the bottom of the ranking. For example, this may be the case when a bug originates in a constructor.\n\n\\subsection{PredFL \\cite{PredFL}}\nThere exist two groups of approaches used for bug localization that are similar. In particular, spectrum based \\cite{ItemSetMining,PRFL} and statistical debugging based \\cite{Liblit} approaches. Spectrum based methods (cf.\\ Section \\ref{spectrumb:based}) collect information about the coverage of program elements and use a formula referred to as a fault locator to calculate how suspicious each element is for causing a bug. Statistical debugging approaches seed predicates into the program and collect information about their coverage and their values. Based on this information, the importance of each predicate is calculated. While both types of methods use coverage information, they have been researched independently. Research on spectrum based approaches is focused on developing different fault locators (i.e.\\ suspiciousness formulas), while work on statistical debugging is concentrated on different classes of predicates. Jiang et al.\\ \\cite{PredFL} propose Predicate-based Fault Localization (PredFL) \\cite{PredFL}, an approach that combines both groups.\n\n\\subsubsection{Methodology}\nTo combine both groups of approaches Jiang et al.\\ \\cite{PredFL} consider Spectrum based fault localization (SBFL) as a Statistical debugging (SD) predicate. Because in Spectrum based localization the coverage of an element is important, this behaviour is easily translated to a predicate which evaluates to true when the element is covered. Moreover, SBFL values such as the number of successful/failed tests covering an element are mapped to the number of successful/failed executions in which a predicate is evaluated to true at least once. However, statistical debugging approaches and Spectrum based ones have different outputs. While the former results in a list of important predicates, the latter returns a list of suspicious elements. Therefore, Jiang et al.\\ \\cite{PredFL} decide that their unified model should return a list of suspicious elements as well. This is done so with the assumption that there exists a higher-order function called the combining method which computes the suspiciousness of an element by aggregating the importance scores of predicates. The final model consists of four parameters: a seeding function, a risk evaluation formula, a granularity function and a combining method.\n\nJiang et al.\\ \\cite{PredFL} perform several experiments in order to analyze the importance of the four aforementioned parameters. As a result of these experiments, they propose PredFL which is the unified model with a default configuration.\n\n\\subsubsection{Evaluation}\nBecause PredFL combines two groups of approaches and therefore is implicitly covered by existing techniques Jiang et al.\\ \\cite{PredFL} are interested in whether the approach is complementary to state-of-the-art techniques. They do so by integrating PredFL into CombineFL \\cite{CombineFL}, a fault localization framework that consists of various types of techniques, such as spectrum based, information retrieval based, etc. All of these techniques are distributed in levels according to their execution time, from seconds (Level 1) to hours (Level 4). After integrating PredFL into CombineFL, their combined performance is evaluated on the Defects4j dataset (cf.\\ Section \\ref{evaluation:datasets}) using the Recall at Top 1,3,5,10 metric (cf.\\ Section \\ref{evaluation:metrics}). Note that for the experiments mentioned in the Methodology section the same dataset and evaluation metric was used with the addition of the EXAM score (cf.\\ Section \\ref{evaluation:metrics}).\n\nJiang et al.\\ \\cite{PredFL} observe that after the integration of PredFL, CombineFL improves its recall at all Top levels (1,3,5,10) with 4-8\\%. Thus, they conclude that PredFL is complementary to existing techniques.\n\n\\subsubsection{Advantages and Disadvantages}\nPredFL combines two types of approaches successfully and as shown by Jiang et al.\\ \\cite{PredFL} can be used to improve the performance of another technique. However, there exist several concerns. Firstly, the technique is not evaluated as a standalone against the performance of state-of-the-art SBFL and SD approaches. Therefore, it is not sufficiently investigated whether the benefit of combining both types of techniques is significant or not. Secondly, the implementation made available by Jiang et al.\\ \\cite{PredFL} is built on Java Development Tools (JDT), used for the generation of predicates. This makes the implementation language-specific. Furthermore, if the strategy of PredFL is to be implemented in another language, a way to generate and seed predicates into the program is needed. This may be difficult or not possible and could impact performance.\n\n\\subsection{PRFL \\cite{PRFL}}\nThere exist various bug localization techniques and tools based on Spectrum analysis. Zhang et al.\\ \\cite{PRFL} note that although there is a multitude of techniques, there isn't one that is the best performing. They attribute this to the fact that all the methods focus on differentiating the programming entities (e.g.\\ methods, executable statements, etc.) which represent one aspect of program spectra. There is no work concentrating on the differentiation of tests, which represent another aspect of program spectra. Zhang et al.\\ \\cite{PRFL} explore the contribution of different tests to improve on the weaknesses of existing spectrum based techniques. They present PRFL \\cite{PredFL}, a method based on PageRank that improves Spectrum based bug localization by taking into consideration additional test information with the use of the PageRank algorithm \\cite{page1999pagerank}. After PageRank is used to recompute spectrum information existing spectrum based (fault locator) formulas can be utilized, resulting in better bug localization performance.\n\nPageRank \\cite{page1999pagerank} is an algorithm that improves the speed and quality of a search. In the context of a web search, PageRank considers the World Wide Web as a graph containing nodes (i.e. web pages) that are linked. Intuitively, the algorithm gives higher importance to nodes that are linked by important nodes and lower importance to those linked by unimportant nodes. PageRank has been applied in different domains such as Biology, Chemistry, Neuroscience, Recommendation systems, Social networks and others.\n\n\\subsubsection{Methodology}\nPRFL consists of three main phases. The first phase is the preparation phase. This phase uses both static and dynamic analysis to obtain test coverage information and construct a call graph that maps the runtime connections of methods. This is followed by the second phase - the PageRank analysis. In the second phase, weighted spectrum information is generated for all the tests. In the final phase, referred to as the Ranking phase, fault locators (i.e. Spectrum based equations) are applied on the weighted spectrum to rank methods in the source code. The weighted spectrum does not only include information from the test coverage, but also information from the call graph constructed in the first phase as well as test scopes. Zhang et al.\\ \\cite{PRFL} argue that the weighted spectrum reflects more accurately whether a method is faulty or not, therefore, improving the efficiency of spectrum based techniques.\n\n\\subsubsection{Evaluation}\nZhang et al.\\ \\cite{PRFL} two datasets for the evaluation of PRFL. The first one contains real-world bugs and consists of five projects containing 357 bugs (JFreeChart, Closure Compiler, Apache Commons Lang, Apache Commons Math, Joda-Time) from the Defects4j dataset (cf.\\ Section \\ref{evaluation:datasets}). The second dataset consists of artificially created bugs. Zhang et al.\\ \\cite{PRFL} argue that the real-world bugs are somewhat limited. For that reason, they use the PIT mutation testing tool \\cite{PIT} to create mutation bugs using 87 of the most popular Java projects on Github. This results in 30 692 artificial bugs. The evaluation metrics used are AWE (absolute wasted time), which is the same as the wasted time metric described in Section \\ref{Itemset:eval}, and Recall at Top 1,3,5 (cf.\\ Section \\ref{evaluation:metrics})\n\nInitially, in the evaluation PRFL is compared against state-of-the-art spectrum based techniques. This is done by recording the performance of each technique without the use of PRFL and then the performance of the technique combined with PRFL on all the projects from the Defects4j dataset. The versions of the techniques containing PRFL outperform their counterparts. Interestingly, PRFL not only improves AWE and the Recall at Top N, but it provides the biggest improvement when combined with the best performing techniques. In addition, Zhang et al.\\ \\cite{PRFL} evaluate the overhead brought by the computation of the call graph and the page rank analysis. They find that the overhead is insignificant (in the order of seconds) supporting the claim that PRFL is a lightweight technique.\n\nNext, Zhang et al.\\ \\cite{PRFL} study the impact of the number of bugs on PRFL. Defects4j contains single-faults and multi-faults. In the single-fault, a single bug is present in the program. In the multi-fault, there can be multiple existing bugs. Zhang et al.\\ \\cite{PRFL} split the previous results into results on single-fault and results on multi-fault. They observe that state-of-the-art techniques perform inconsistently on the two sets. For example, the Op2 technique \\cite{naish2011model} which outperforms all other techniques on single-faults, performs the worst on multi-faults. Although PRFL boosts the performance of all techniques in both cases, the improvements are larger on single-faults.\n\nFinally, PRFL is evaluated on artificial bugs. This is performed in a similar manner as the evaluation on real bugs. The evaluation shows that the type of bug (i.e.\\ real/artificial) does not impact the performance of PRFL.\n\n\\subsubsection{Advantages and Disadvantages}\nPRFL is advantageous because it boosts the performance of state-of-the-art spectrum based techniques while having an insignificant overhead. Nevertheless, as Zhang et al.\\ \\cite{PRFL} express in the introduction of their work, the performance of state-of-the-art spectrum based techniques is limited and there is not a best performing one. Taking this into consideration, Zhang et al.\\ \\cite{PRFL} do not establish whether the boost of PRFL is significant enough that the technique can be used in practice. Furthermore, the results of spectrum based techniques are worsened in the presence of multiple bugs (multiple-fault case). Unfortunately, in practice, this case may commonly occur.\n\n\\subsection{Recommendation}\nEvery tool and technique investigated in this study is unique. Not only because of the difference between the types of approaches (e.g. Information Retrieval and Spectrum Based), but also methods sharing the same type are also distinct. For example, while both BugLocator \\cite{BL} and BLUiR \\cite{BLUiR} are information retrieval based, both handle the challenge of bug localization differently. Although each tool has different benefits and disadvantages and its performance might fluctuate in different contexts, I find Legion \\cite{Legion} to be the most beneficial among all considered tools and techniques. This subsection contains the reasoning behind this choice.\n\nFirstly, information retrieval is among the most widely used strategies for bug localization. A lot of research has been done and is being done that boosts the performance of information retrieval techniques and tools for bug localization. Although IR based bug localization methods struggle with the use of synonymous words in bug reports and source code files, IR approaches can be combined with ML and DL techniques to alleviate this problem. In addition, as displayed by Legion, the inclusion of additional corpora can also improve the associations between bug reports and source code.\n\nLegion is an extension of a state-of-the-art IR based bug localization approach, built and evaluated in an industrial setting. While many tools and techniques attempt to use real-world bugs, Legion's performance is evaluated on closed source repositories used in industry. Furthermore, Legion is among the very few tools evaluated on an unconventional metric - the expectations of developers. The majority of other new studies do not consider such expectations. Researchers consider mainly the metrics employed by previous state-of-the-art techniques and attempt to outperform them. Even though this evaluation is important, it does not aid the adoption of bug localization techniques by software practitioners. Legion, however, displays performance sufficient for use by junior and less experience developers.\n\nFinally, Jarman et al.\\ \\cite{Legion} make several propositions for future work that could improve Legion even further.\n\n\\section{Conclusion}\n\\label{conclusion}\nThis study investigated different bug localization tools and techniques of different types. The focus of the work was on tools and techniques evaluated in an industrial setting. However, due to the lack of such methods, works evaluated on open source projects were also included. Each work was briefly introduced followed by a description of its evaluation, as well as its advantages and disadvantages. Finally, a recommendation was presented in which Legion \\cite{Legion} was proposed as the most advantageous approach among all considered in this study for use in the industry.\n\n\\section*{Acknowledgment}\nI am grateful to Dr Eleni Constantinou (Eindhoven University of Technology) and Dr Dennis Dams (ESI-TNO) for providing scientific guidance.\n\n\\bibliographystyle{ieeetr}\n"} +{"id": "red-arxiv-5", "source_id": "red-arxiv_5_red-arxiv-5", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\nIn this paper we continue our work that connects random groups with the first-order theory of nonabelian free groups (see \\cite{KhS}). We generalize our previous result, that a random group (of density $d<1/16$) satisfies with overwhelming probability a universal sentence in the language of groups if and only if the sentence is satisfied in a nonabelian free group, to $\\forall\\exists$-sentences. Our main result is \n\n\\begin{thmIntro}\nLet $\\sigma$ be a $\\forall\\exists$ first-order sentence in the language of groups. Let $0\\leq d<1/16$ be a real number. Then a random group of density $d$ satisfies, with overwhelming probability, the sentence $\\sigma$ if and only if a non abelian free group satisfies $\\sigma$.\n\\end{thmIntro}\n\nWe will make heavy use of the machinery developed for answering Tarski's question and in particular {\\em formal solutions, towers, closures of towers} (see \\cite{MR1972179}, \\cite{MR2154989}) and the {\\em process of validating a $\\forall\\exists$-sentence} (see \\cite{Sela4}, \\cite{MR2293770}). \n\n\n\\section{Preliminaries}\n\n\\subsection{The density model}\nRecall Gromov's density model of randomness. \n\n\\begin{definition}[Gromov's Density Model]\\label{Density}\nLet $\\mathbb{F}_n:=\\langle e_1, \\ldots, e_n\\rangle$ be a free group of rank $n$. Let $S_{\\ell}$ be the set of reduced words on $e_1, \\ldots, e_n$ of length $\\ell$. \n\nLet $0\\leq d\\leq 1$. Then a random set of relators of density $d$ at length $\\ell$ is a subset of $S_{\\ell}$ that consists of $(2n-1)^{d\\ell}$-many elements picked randomly (uniformly and independently) among all elements of $S_{\\ell}$. \n\nA group $G:=\\langle e_1,\\ldots, e_n \\ | \\ \\mathcal{R} \\ \\rangle$ is called random of density $d$ at length $l$ if $R$ is a random set of relators of density $d$ at length $\\ell$. \n\n\nA random group of density $d$ satisfies some property (of presentations) $P$ with overwhelming probability (w.o.p.), if the probability of occurrence of $P$ tends to $1$ as $\\ell$ goes to infinity. \n\\end{definition}\n\nWe note in passing that at density $0$, we, formally, have one relator of length $\\ell$, but the usual convention is that instead of one we have a finite fixed number of relators of length $\\leq \\ell$ (see \\cite[Remark 12]{MR2205306}). Hence the few-relator model (see \\cite[Definition 1]{MR2205306}) is a special case of the density model for $d=0$. \n\nHeuristically, one can understand this as follows: the ratio of groups in $(2n-1)^{d\\ell}$ relators all of length $\\ell$ that satisfy property $P$ over all such groups is a number, say $p$, that for ``interesting properties\" will depend on $\\ell$, i.e. $p:=p(\\ell)$. If $p(\\ell)$ goes to $1$ as $\\ell$ goes to $\\infty$, then we say that w.o.p. a random group has property $P$. \n\n\nWe will need the following results from \\cite{KhS}. \n\n\\begin{theorem}\\label{univ1} \\cite[Theorem 7.14]{KhS}\nLet $d<1/16$. Let $\\sigma$ be a universal sentence in the language of groups. Then $\\sigma$ is almost surely true in the random group of density $d$ if and only if it is true in a nonabelian free group.\n\\end{theorem}\n\nTheorem $\\ref{univ1}$ was obtained as a corollary of the following.\n\n\\begin{theorem}\\label{univ} \\cite[Proposition 7.13]{KhS}\nLet $d<1/16$. Let $V(\\bar{x})=1$ be system of equations. Suppose $\\Gamma_\\ell$ is a random group of density $d$ at length $\\ell$ and $\\pi_\\ell:\\F_n\\rightarrow\\Gamma_\\ell$ the canonical quotient map. \n\nThen, every solution $V(\\bar b_\\ell)=1$ in $\\Gamma_\\ell$ is the image of a solution $V(\\bar{c}_\\ell)=1$ in $\\F_n$, under the canonical quotient map $\\pi_{\\ell}$, i.e. $\\pi_\\ell(\\bar{c}_\\ell)=\\bar{b}_\\ell$, with probability tending to $1$ as $\\ell$ goes to infinity. \n\\end{theorem}\n\nTo understand Theorem \\ref{univ} in the light of Definition \\ref{Density} one considers the following property $P$: for a fixed $V(\\bar x)=1$, every solution of $V(\\bar x)=1$, is the pre-image of a solution of $V(\\bar x)=1$ in $\\F$, under the canonical quotient map. \n\nAlternatively, under the same interpretation, one can think that the above theorem says that either $\\bar b_\\ell$ is the image (under the canonical map) of a solution of $V(\\bar x)=1$ in $\\F_n$, or the probability that $V(\\bar{b}_\\ell)=1$ tends to $0$ as $\\ell$ goes to $\\infty$. \n\nWith little care about defining constants (coefficients) in random groups one can make sense of a first-order sentence with constants.\n\n\\begin{definition}\nLet $\\Gamma_\\ell$ be a random group of density $d$ at length $\\ell$. Let $b_\\ell$ be an element in $\\Gamma_\\ell$. Then $b_\\ell$ is a constant if there exists an element $b\\in\\F_n$ such that $b_\\ell$ is the image of $b$, under the canonical quotient map, for all $\\ell\\in\\mathbb{N}$. \n\\end{definition}\n\n\nHence, under the above definition, a sentence with constants in $\\F_n$ makes sense in a random group and likewise a sentence with constants in a random group makes sense in $\\F_n$. \n\nFor the purposes of this paper we need a generalization of Theorem \\ref{univ1} to first-order sentences defined over constants. \n\n\\begin{theorem}\\label{UnivwithCon1}\nLet $d<1/16$. Let $\\sigma$ be a universal sentence in the language of groups over $\\F_n$ with constants. Then, $\\sigma$ is true in $\\F_n$ if and only if it is true with overwhelming probability in a random group of density $d$.\n\\end{theorem}\n\n\nIt is essentially a corollary of a generalization of Theorem \\ref{univ} with constants. \n\n\\begin{theorem}\\label{UnivwithCon}\nLet $d<1/16$. Let $V(\\bar{x},\\bar a)=1$ be system of equations over $\\F_n$. Suppose $\\Gamma_\\ell$ is a random group of density $d$ at length $\\ell$ and $\\pi_\\ell:\\F_n\\rightarrow\\Gamma_\\ell$ the canonical quotient map. \n\nThen, every solution $V(\\bar b_\\ell,\\bar a)=1$ in $\\Gamma_\\ell$ is the image of a solution $V(\\bar{c}_\\ell,\\bar a)=1$ in $\\F_n$, under the canonical quotient map $\\pi_{\\ell}$, i.e. $\\pi_\\ell(\\bar{c}_\\ell)=\\bar{b}_\\ell$, with probability tending to $1$ as $\\ell$ goes to infinity. \n\\end{theorem}\n\n\\subsection{Boolean combinations of Universal Existential Axioms}\n\nThe following lemma is due to Malcev. Its proof uses the fact that all solutions in a free group of the equation $x^2y^2z^2=1$ commute.\n\\begin{lemma}\\label{Conj}\nLet $\\F:=\\langle e_1, e_2, \\ldots\\rangle$ be a nonabelian free group. Then, conjunctions of equations are equivalent (over constants), in $\\F$, with one equation: \n$$\\F\\models \\forall x, y \\bigl((x=1\\land y=1)\\leftrightarrow (x^2e_1)^2e_1^{-2}=((ye_2)^2e_2^{-2})^2\\bigr)$$\n\\end{lemma}\nFor disjunctions we get the following\n\\begin{lemma}\\label{Disj}\nLet $\\F:=\\langle e_1, e_2, \\ldots\\rangle$ be a nonabelian free group. Then, a disjunction of equations is equivalent (over constants), in $\\F$, with four conjunctions of equations:\n$$\\F\\models \\forall x, y \\bigl((x=1\\lor y=1)\\leftrightarrow \\bigwedge_{a\\in\\{e_1, e_1^{-1}\\}\\atop b\\in\\{e_2, e_2^{-1}\\}}[x^{a}, y^{b}]=1\\bigr)$$\n\\end{lemma}\n\nIn particular, one easily obtains the following corollary. \n\n\\begin{corollary} \\label{one_eq_free}\nA disjunction of conjunctions (or conjunction of disjunctions) of equations over $\\F$ is equivalent, in $\\mathbb F$, to one equation.\n\\end{corollary}\n\nSince the above Corollary can be expressed by a universal formula we also get. \n\n\\begin{corollary}\\label{one_eq} \nA disjunction of conjunctions (or conjunction of disjunctions) of equations over $\\Gamma$ is equivalent in $\\Gamma$ to the same one equation as in $\\mathbb F$.\n\\end{corollary}\n \n\\begin{lemma}(cf. \\cite[Lemma 6]{MR2154989}) \\label{reduction}\nLet $\\tau$ be a $\\forall\\exists$ first-order sentence in the language of groups. Then, $\\tau$ is equivalent in $\\mathbb F_n$ to a sentence $\\zeta$ of the form\n $$\\forall\\bar{x}\\exists\\bar{y} \\bigl(\\sigma(\\bar{x},\\bar{y}, \\bar{a})=1 \\land \\psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1\\bigr)$$ \nwhere $\\sigma(\\bar{x},\\bar{y}, \\bar{a})=1$ is an equation and $\\psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1$ is an inequation, both over constants from $\\F_n$.\n\nMoreover, if $d<1/16$, then $\\tau$ is almost surely true in a random group of density $d$ if and only if the sentence $\\zeta$ is. \n\n\\end{lemma}\n\\begin{proof} Every $\\forall\\exists$ sentence in the language of groups is (logically) equivalent to a formula in prenex (disjunctive) normal form \n$$\\forall\\bar x\\exists \\bar y\\Bigl( \\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})\\neq 1\\bigr)\\Bigr)$$\nIn any non-trivial group the quantifier-free part, $\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})\\neq 1\\bigr)$, of the above sentence is equivalent to \n$$\\exists \\bar z_1,\\ldots ,\\bar z_m \\bigl((\\bigwedge_{i=1}^m \\bar z_i\\neq 1)\\bigwedge\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})= \\bar z_i\\bigr)$$ \nBy Corollary \\ref{one_eq_free}, the disjunction of conjunctions of equations \n$$\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})=\\bar z_i\\bigr)$$\nis equivalent to one equation $\\sigma (\\bar{x},\\bar{y},\\bar z_1,\\ldots ,\\bar z_m,\\bar a)=1$ over constants in $\\mathbb F_n$. Similarly, the conjunction $\\bigwedge_{i=1}^m \\bar z_i\\neq 1$ is equivalent to a single inequation $\\psi(\\bar z_1,\\ldots, \\bar z_m,\\bar a)\\neq 1$ over constants in $\\F_n$. Hence, we can take for $\\zeta$ the following sentence\n\n$$\\forall\\bar x\\exists \\bar y \\exists z_1, \\ldots, z_m \\bigl(\\psi(\\bar z_1, \\ldots, \\bar z_m,\\bar a)\\neq 1\\bigwedge \\sigma (\\bar{x},\\bar{y},\\bar z_1,\\ldots, \\bar z_m,\\bar a)=1 \\bigr)$$\n\n\nFor a random group $\\Gamma$ of density $d<1/16$ we argue as follows. The sentence $\\forall\\bar x\\exists \\bar y\\Bigl( \\bigvee_{i=1}^m\\bigl($ $\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})\\neq 1\\bigr)\\Bigr)$ is almost surely true in $\\Gamma$ if and only if $\\forall \\bar x \\exists \\bar y \\exists \\bar z_1,\\ldots,\\bar z_m \\Bigl((\\bigwedge_{i=1}^m \\bar z_i\\neq 1)\\bigwedge\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})= \\bar z_i\\bigr)\\Bigr)$ is almost surely true in $\\Gamma$. In addition, \n$$\\F_n\\models \\forall\\bar x\\forall\\bar y\\forall\\bar z\\Bigl( \\Big[(\\bigwedge_{i=1}^m \\bar z_i\\neq 1)\\bigwedge\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})= \\bar z_i\\bigr)\\Big]\\leftrightarrow $$ $$\\bigl(\\psi(\\bar z_1, \\ldots, \\bar z_m,\\bar a)\\neq 1\\bigwedge \\sigma (\\bar{x},\\bar{y},\\bar z_1,\\ldots,\\bar z_m,\\bar a)=1\\bigr)\\Bigr)$$\n\nThe above sentence, by Theorem \\ref{UnivwithCon1}, is almost surely true in $\\Gamma$. In particular, $\\forall \\bar x \\exists \\bar y \\exists \\bar z_1,\\ldots, $ $\\bar z_m\\Bigl((\\bigwedge_{i=1}^m z_i\\neq 1)\\bigwedge\\bigvee_{i=1}^m\\bigl(\\Sigma_i(\\bar{x},\\bar{y})=1 \\land \\Psi_i(\\bar{x},\\bar{y})= \\bar z_i\\bigr)\\Bigr)$ is almost surely true in $\\Gamma$ if and only if $\\forall\\bar x\\exists \\bar y \\exists \\bar z_1, \\ldots, \\bar z_m \\bigl(\\psi(\\bar z_1, \\ldots, \\bar z_m,\\bar a)\\neq 1\\bigwedge \\sigma (\\bar{x},\\bar{y},\\bar z_1,\\ldots,\\bar z_m,\\bar a)=1 \\bigr)$ is almost surely true in $\\Gamma$.\n\\end{proof}\n\n\n\n\n\n\\subsection {Validation of a $\\forall\\exists$ sentence in nonabelian free groups.}\\label{Validation} \n\n\nLet $\\forall\\bar{x}\\exists\\bar{y} \\bigl(\\Sigma(\\bar{x},\\bar{y}, \\bar{a})=1 \\land \\Psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1\\bigr)$, \nwhere $\\Sigma(\\bar{x},\\bar{y}, \\bar{a})=1$ is a conjuction of equations and $\\Psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1$ a conjuction of inequations, be a true sentence in a nonabelian free group $\\F:=\\langle \\bar a \\rangle$. The idea, for validating the above sentence, is to find witnesses for the existentially quantified variables $\\bar{y}$ in terms of the universally quantified variables $\\bar{x}$ and the constants $\\bar{a}$ as words in $\\langle\\bar{x}, \\bar{a}\\rangle$. Indeed, the first step of the validating process is based on the following theorem \\cite[Theorem 1.2]{MR1972179}, \\cite{MR2154989}:\n\n\\begin{theorem}\\label{Merz}\nLet $\\mathbb{F}\\models\\forall\\bar{x}\\exists\\bar{y}(\\Sigma(\\bar{x},\\bar{y}, \\bar{a})=1 \\land \\Psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1)$. Then, there exists a tuple of words $\\bar{w}(\\bar{x}, \\bar{a})$ in the free group $\\langle\\bar{x}, \\bar{a}\\rangle$, such that $\\Sigma(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}), \\bar{a})$ is trivial in $\\langle\\bar{x}, \\bar{a}\\rangle$ and moreover $\\mathbb{F}\\models \\exists\\bar{x}\\Psi(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}), \\bar{a})\\neq 1$. \n\\end{theorem}\n\nIn the special case where no inequations exist, Theorem \\ref{Merz} is known as {\\em Merzlyakov's theorem} and leads to the equality of the positive theories of nonabelian free groups. We think of $\\bar{w}(\\bar{x},\\bar{a})$ as validating the sentence in a particular subset of $\\mathbb{F}^{\\abs{\\bar{x}}}$. What is left to do is find validating witnesses for the complement of this subset. The subset of $\\mathbb{F}^{\\abs{\\bar{x}}}$ for which the formal solution {\\bf does not} work is first-order definable by the union of the following ``varieties\" $\\psi_1(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}), \\bar{a})=1, \\ldots, \\psi_k(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}),\\bar{a})=1 $, where each $\\psi_i(\\bar{x},\\bar{y}, \\bar{a})$, for $i\\leq k$, is a word in $\\Psi(\\bar{x},\\bar{y},\\bar{a})$. One can further split each variety $\\psi_i(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}))$ in finitely many irreducible varieties, i.e. systems of equations $\\Sigma_{i1}(\\bar{x},\\bar{a})=1, \\ldots, \\Sigma_{im_i}(\\bar{x},\\bar{a})=1$ for $i\\leq k$, such that $L_{ij}:=\\langle \\bar{x},\\bar{a} \\ | \\ \\Sigma_{ij}(\\bar{x}, \\bar{a})\\rangle$, for $i\\leq k$ and $j\\leq m_i$, is a (restricted) limit group. \n\nThe iterative step of the process uses a further generalization of Merzlyakov's theorem that we record next (see \\cite[Theorem 1.18]{MR1972179}, \\cite{MR2154989}). For convenience of notation we denote by $G(\\bar{x})$ a group $G$ with generating set $\\bar{x}$.\n\n\\begin{theorem}\\label{MerzTowers}\nLet $L(\\bar{x},\\bar{a}):=\\langle \\bar{x}, \\bar{a} \\ | \\ R(\\bar{x},\\bar{a})\\rangle$ be a restricted limit group, and $T(\\bar{x},\\bar{z}, \\bar{a})$ a tower constructed from a well-structured resolution of $L(\\bar x, \\bar a)$. \n\nSuppose \n$$\\mathbb{F}\\models \\forall\\bar{x} (R(\\bar{x},\\bar{a})=1 \\rightarrow \\exists \\bar{y}(\\Sigma(\\bar{x},\\bar{y},\\bar{a})=1 \\land \\Psi(\\bar{x},\\bar{y},\\bar{a})\\neq 1))$$\nThen there exist, $C_1(\\bar{x},\\bar{z},\\bar{s},\\bar{a})$, \\ldots, $C_q(\\bar{x},\\bar{z},\\bar{s},\\bar{a})$ a covering closure of $T(\\bar{x},\\bar{z},\\bar{a})$ and ``formal solutions\", $\\bar{w}_1(\\bar{x},\\bar{z},\\bar{s},\\bar{a}), \\ldots, \\bar{w}_q(\\bar{x},\\bar{z},\\bar{s},\\bar{a})$ with the following properties: \n\\begin{itemize}\n\\item for each $1\\leq i \\leq q$, the words $\\Sigma(\\bar{x}, \\bar{w}_i(\\bar{x},\\bar{z},\\bar{s},\\bar a),\\bar a)$ are trivial in $C_i(\\bar{x},\\bar{z},\\bar{s},\\bar a)$. \n\\item for each $1\\leq i\\leq q$, there exists a morphism $h_i:C_i\\rightarrow \\mathbb{F}$, which is the identity on $\\F$ and such that $\\Psi(h_i(\\bar{x}), h_i(\\bar{w}_i),\\bar{a})\\neq 1$. \n\\end{itemize}\n\\end{theorem}\n\nIn principle formal solutions do not exist in arbitrary limit groups, but only in limit groups that admit a special structure - the structure of a tower. A tower is constructed as a nested graph of groups based on a free group and ``gluing\" at each step either a surface with boundaries (along the boundaries) or a free abelian group along a direct factor. The covering closure of a tower is a finite set of towers where the free abelian ``floors\" are augmented to finite index supergroups (see \\cite[Definition 1.15]{MR1972179}). It covers the original tower in the sense that every morphism from the original tower to a free group, extends (as a closure contains the tower as a subgroup) to a morphism from some closure. The precise construction is of no practical importance for this paper, but we note that as a consequence the (new) subset of $\\mathbb{F}^{\\abs{x}}$ for which the (new) formal solution {\\bf does not work} is not a union of varieties anymore, but is {\\bf contained} in a union of Diophantine sets. This subtlety makes the proof that the process terminates quite involved. In any case, the important fact for us is that after repeatedly using the above theorem we will eventually cover all of $\\mathbb{F}^{\\abs{x}}$ with finitely many subsets for which some formal solution works.\n\nWe will give some more details on the procedure avoiding the technical results that imply its termination after finitely many steps. It will be important to carefully collect the ``bad\" tuples in a set, in such a way that the procedure terminates. This set will occasionally be larger than needed, i.e. it will also contain tuples for which a formal solution already works, but this is unavoidable, under the existing methods, if one wants to guarantee the termination. We next explain how this works. For presentational purposes we will first record a special case, called {\\em the minimal rank case}.\n\\ \\\\ \\\\\n{\\bf The minimal rank case.} The assumption in this case is that all (restricted) limit groups $L_{ij}$, for $i\\leq k$ and $j\\leq m_i$, that collect the ``bad\" tuples in the first step of the procedure {\\bf do not admit} a surjection (that is the identity on $\\F$) to a free product $\\F*\\F_n$, for some nontrivial free group $\\F_n$. This simplified version of the iterative procedure is presented, for example, in \\cite[Section 1]{Sela4}. This assumption considerably simplifies the technicalities in the construction of the towers on each consecutive step and guarantees that their complexity decreases. \n\n\\begin{enumerate}\n\\item In the first step of the procedure we apply Theorem \\ref{Merz} to the sentence $\\forall\\bar{x}\\exists\\bar{y}(\\Sigma(\\bar{x},\\bar{y}, \\bar{a})=1 \\land \\Psi(\\bar{x},\\bar{y}, \\bar{a})\\neq 1)$ and obtain a formal solution $\\bar{w}(\\bar{x},\\bar{a})$ that {\\bf does not work} in the union of varieties $\\psi_1(\\bar{x},\\bar{w}(\\bar{x},\\bar{a}),\\bar{a})=1, \\ldots, \\psi_k(\\bar{x},\\bar{w}(\\bar{x}, \\bar{a}),\\bar{a})=1$.\n\\item The latter union of varieties can be further decomposed as $Hom_{\\F}(L_{ij},\\F)$, for $i\\leq k$ and $j\\leq m_i$, where $L_{ij}$ is a restricted limit group, and $Hom_{\\F}(L_{ij},\\F)$ is the set of restricted homomorphisms from $L_{ij}$ to $\\F$. Equivalently, this can be seen as a decomposition of each variety to its irreducible components. \n\\item In the iterative step of the procedure we work with each $L_{ij}$ in parallel. Thus, we can fix $L:=L_{ij}$ for some $i\\leq k$ and $j\\leq m_i$. To each (restricted) limit group, $L$, we can assign finitely may towers (based on well-structured resolutions of $L$), $T_1, \\ldots, T_{\\ell}$, such that any (restricted) morphism $h:L\\rightarrow\\F$ factors through $T_i$, for some $i\\leq\\ell$. We will, again, work in parallel with each $T_i$. Thus, we can fix $T:=T_i$, for some $i\\leq \\ell$.\n\\item We apply Theorem \\ref{MerzTowers} for the couple $L$ and $T$ and obtain finitely many closures $C_1(\\bar x, \\bar z, \\bar s, \\bar a), \\ldots C_q(\\bar x, \\bar z, \\bar s, \\bar a)$ and formal solutions $\\bar{w}_1(\\bar x, \\bar z, \\bar s, \\bar a), \\ldots, \\bar{w}_q(\\bar x, \\bar z, \\bar s, \\bar a)$ for each closure, that each {\\bf does not work} in the definable set \n$$\\exists \\bar z,\\bar s\\bigl(\\Sigma_{C_i}(\\bar{x}, \\bar{z}, \\bar{s}, \\bar{a})=1\\bigr)\\land \\forall \\bar z, \\bar s \\bigl(\\psi_1(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1\\lor\\ldots\\lor\\psi_k(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1)\\bigr)$$\n\\item We do not continue with the previous definable set, but rather with the larger set defined by \n$$\\exists \\bar z, \\bar s \\biggl(\\Sigma_{C_i}(\\bar{x}, \\bar{z}, \\bar{s}, \\bar{a})=1\\bigwedge\\bigl(\\psi_1(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1\\lor\\ldots\\lor\\psi_k(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1\\bigr)\\biggr)$$\n\\item We work with each closure in parallel. Thus we fix $C(\\bar x, \\bar z, \\bar s, \\bar a):=C_i(\\bar x, \\bar z, \\bar s, \\bar a)$ for some $i\\leq q$. We consider the set of (restricted) morphisms, $Hom_{\\F}(C,\\F)$, whose images satisfy $\\psi_1(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1\\lor\\ldots\\lor\\psi_k(\\bar x, \\bar w_i(\\bar x, \\bar z, \\bar s, \\bar a) , \\bar a)=1$ in $\\F$. There exist finitely many (restricted) limit groups, $QL_1, \\ldots, QL_m$, which are quotients of $C$ and such that a (restricted) morphism $h:C\\rightarrow\\F$ satisfies the previous condition if and only if it factors through one of this quotients. \n\\item We repeat the procedure for each $QL_i$ and towers based on resolutions constructed as in \\cite[Proposition 1.13]{Sela4}, by Theorem 1.18 in \\cite{Sela4} the procedure terminates after finitely many steps.\n\\end{enumerate}\n \n\\ \\\\ \\\\\nWe next record the general case. \n\\ \\\\ \\\\\n{\\bf The general case.} In the general case not only the resolutions (used to construct towers) are modified but also the family of morphisms that descend to the next step of the procedure. Instead of well-structured resolutions we restrict to a special subclass called {\\em well-separated resolutions} (see \\cite[Definition 2.2]{Sela4}). One of the properties of well-separated resolutions is that they can be used to endow each surface that corresponds to a $QH$ subgroup of every $JSJ$ decomposition along the resolution with a family of (two-sided, disjoint, non null-homotopic, non parallel) simple closed curves. This family of curves induces a splitting, as a graph of groups, of the fundamental group of the surface by cutting it along them. \n\n\\begin{definition} \nLet $\\Sigma$ be a compact surface. Given a homomorphism $h:\\pi _1(\\Sigma)\\rightarrow H$\na family of pinched curves is a collection $\\mathcal C$ of disjoint, non-parallel, two-sided\nsimple closed curves, none of which is null-homotopic, such that the fundamental\ngroup of each curve is contained in $ker h$ (the curves may be parallel to a boundary component).\n\n\nThe map $\\phi$ is {\\em non-pinching} \\index{Non-pinching map} if there is no pinched curve: $\\phi$ is injective in restriction to\nthe fundamental group of any simple closed curve which is not null-homotopic.\n\\end{definition}\n\nIf $\\eta_{i+1}:L_i\\rightarrow L_{i+1}$ is part of a well-separated resolution and $\\pi_1(\\Sigma):=Q$ is a $QH$ subgroup in a $JSJ$ decomposition of a freely indecomposable free factor of $L_i$, then to $\\eta_{i+1}$ and the free decomposition of $L_{i+1}$ corresponds a family of pinched curves on $\\Sigma$ that we will use in order to define a special class of morphisms called {\\em taut with respect to the given resolution} (see \\cite[Definition 2.4]{Sela4}). \n\n\\begin{remark}\nWhen a morphism $h:L\\rightarrow\\F$ factors through a resolution, $L:=L_0\\rightarrow L_1\\rightarrow L_2\\rightarrow \\ldots \\rightarrow L_p$ then for each $i\\leq p$, there exists a section with respect to the quotient map $\\eta_i:L_{i-1}\\rightarrow L_i$, for $00$ is possible despite drawing data from the KT solution, since $N$ is finite. This gives us precisely the handle we need to determine the value of $N$ to observe crossed-channel rescattering effects.\n\nWe can now perform $B\\in\\mathbb{N}$ runs, which generate $B$ datasets $\\mathbb{D}_b$, $b=1,\\ldots,B$, of size $N$. On each of these datasets, one can compute $\\Delta\\L$ and access its probabilistic distribution.\nFor large values of $B$, this distribution is Gaussian, with the mean and variance given by\n\\begin{align}\n\\text{E}\\left[\\Delta\\L(\\mathbb{D})\\right]&=-N d_\\text{KL} \\,,\\notag\\\\\n\\text{Var}\\left[\\Delta\\L(\\mathbb{D})\\right]&=N\\nu_\\text{KL}\\,,\n\\end{align}\nwhere\n\\begin{align}\n\\tilde{d}_\\text{KL}(s,t)&=f^\\text{KT}(s,t) \\ln\\left(\\frac{f^\\text{KT}(s,t)}{f^\\text{Omn{\\`e}s}(s,t)}\\right)\\,,\\notag\\\\\nd_\\text{KL}&=\\int_D \\tilde{d}_\\text{KL}(s,t)\\dd s\\dd t \\,,\\notag\\\\\n\\tilde{\\nu}_\\text{KL}(s,t)&=f^\\text{KT}(s,t) \\ln\\left(\\frac{f^\\text{KT}(s,t)}{f^\\text{Omn{\\`e}s}(s,t)}\\right)^2\\,,\\notag\\\\\n\\nu_\\text{KL}&= \\int_D \\tilde{\\nu}_\\text{KL}(s,t) \\dd s\\dd t - d^2_\\text{KL}\\,.\n\\label{eq:entropy_variance}\n\\end{align}\nThe expressions are known as the Kullback--Leibler divergence~\\cite{Kullback:1951zyt} and variance.\nThe cumulative distribution function reads\n\\begin{align}\n\\mathcal{N}(x,\\mu(N),\\sigma(N))&=\\frac{1}{2}\\left(1+\\text{erf}\\left(\\frac{x-\\mu(N)}{\\sqrt{2}\\sigma(N)}\\right)\\right) \\notag\\\\\n\\text{with} \\quad \\mu(N)&=-Nd_\\text{KL}\\,, \\quad \\sigma(N)=\\sqrt{N\\nu_\\text{KL}}\\,,\n\\label{eq:mean_std}\n\\end{align}\nwhere erf is the error function. To validate the assumption of a normal distribution, we use the comparison in Fig.~\\ref{fig:histo}, which indicates a very good description.\\footnote{Note that this is not an assumption for large $N$ due to the central-limit theorem.}\nFrom here on we can calculate our results using the pdfs as defined in Eq.~\\eqref{eq:pdf}.\n\n\\begin{figure}\n \\fontsize{12pt}{14pt} \\selectfont\n \\scalebox{0.66}{\\input{plots/histo.tex}}\n \\caption{Histogram for $B=10^6$ datasets of different sample size $N$. For this plot, the amplitudes are computed at $M=M_\\phi$ and for $J^{PC}=1^{--}$. Additionally, we plot Gaussians with the mean and standard deviation from Eq.~\\eqref{eq:mean_std}.}\n \\label{fig:histo}\n\\end{figure}\n\nIn the region $\\Delta\\L<0$ we reject the hypothesis that the Omn{\\`e}s solutions are sufficient to describe the data. \nThe probability that the hypothesis is not rejected then reads\n\\begin{equation}\nq(N)=1-\\mathcal{N}(0,\\mu(N),\\sigma(N))\\,.\n\\end{equation}\nThe inversion of the equation gives the number of events with the confidence determined by $q$ via\n\\begin{equation}\nN(q)=2\\nu_\\text{KL}\\left(\\frac{\\text{erf}^{-1}(1-2q)}{d_\\text{KL}}\\right)^2\\,.\n\\end{equation}\nFor a $5\\sigma$ confidence level we need to have $q=2.87\\cdot10^{-7}$~\\cite{ParticleDataGroup:2022pth} and can now compute the resulting $N$.\n\n\\begin{figure*}[t!]\n \\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.65}{\\input{plots/q_mass_dependence.tex}}\n\t\\end{subfigure}\n \\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.65}{\\input{plots/N_mass_dependence.tex}}\n\t\\end{subfigure}\n \\caption{\\textit{Left panel:} Probability of the Omn{\\`e}s model not being rejected as a function of the mass of the decaying particle, for all four reconstruction theorems and $N=1000$. \\textit{Right panel:} Statistics needed to set the probability that the Omn{\\`e}s model is not rejected to $5\\sigma$ (right). The gray vertical lines, from left to right correspond to the masses of the $\\omega$, $\\phi$, $\\omega(1420)$, and $\\omega(1650)$ resonances.}\n \\label{fig:mass_dep}\n\\end{figure*}\n\n\\begin{figure*}[t!]\n \\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.65}{\\input{plots/q_mass_dependence_diff_1mm.tex}}\n\t\\end{subfigure}\n \\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.65}{\\input{plots/q_mass_dependence_diff_1mp.tex}}\n\t\\end{subfigure}\n \\caption{\\textit{Left panel:} Comparison for the $1^{--}$ reconstruction theorem with normal SVAs and $1^{-+}$ SVAs (``mixed''). \\textit{Right panel:} Comparison for $1^{-+}$ reconstruction theorem with normal SVAs and $1^{--}$ SVAs (``mixed''). We show the probability of the Omn{\\`e}s model not being rejected as a function of the mass of the decaying particle. Vertical gray lines as in Fig.~\\ref{fig:mass_dep}.}\n \\label{fig:q_mass_diff}\n\\end{figure*}\n\n\\section{Results}\\label{sec:results}\n\nAmong the three-pion decays studied in this paper, only two Dalitz plots have been studied experimentally with sufficiently high statistics: $\\omega\\to3\\pi$~\\cite{WASA-at-COSY:2016hfo,BESIII:2018yvu}, and $\\phi\\to 3\\pi$~\\cite{KLOE:2003kas,Akhmetshin:2006sc}.\n\nFor $\\omega\\to 3\\pi$, WASA-at-COSY~\\cite{WASA-at-COSY:2016hfo} has performed a Dalitz plot study with $44\\,080$ events, \nwhile the analysis by BESIII~\\cite{BESIII:2018yvu} is based on $260\\,520$ events. \nBoth experiments parameterize the distribution by a polynomial expansion and present results testing one- and two-parameter models.\nApplying the formalism of the preceding section, we find that the statistics of WASA-at-COSY is sensitive to rescattering effects only at $2.1\\sigma$. On the other hand, in agreement with Ref.~\\cite{JPAC:2020umo}, BESIII reaches a $5\\sigma$ level for the solution containing one subtraction.\nHowever, as pointed out by Ref.~\\cite{JPAC:2020umo}, an additional subtraction leads to a better agreement for the Dalitz plot parameters. \n\nFor $\\phi\\to 3\\pi$, KLOE~\\cite{KLOE:2003kas} provides a Dalitz plot analysis using $2\\cdot 10^6$ events, while CMD-2~\\cite{Akhmetshin:2006sc} has studied almost $8\\cdot 10^4$ decays. For both, rescattering effects are clearly observable, as concluded by Ref.~\\cite{Niecknig:2012sj}. \n\nUsing the statistical method explained above and the derived reconstruction theorems, we compute $q(N)$ and $N(q)$ for a large mass range as shown in Fig.~\\ref{fig:mass_dep}. The first observation is that the results for $1^{-+}$ and $2^{++}$ are extremely similar: the different kinematic factors seem to play a minor role, and the overall behavior is dominated by the form of the reconstruction theorem, which is the same in these two cases under the approximations we made. The mass dependence of $N$ and $q$ for the $1^{--}$ decay, however, looks strikingly different.\nTo investigate the source of this difference in sensitivity between the two reconstruction theorems, we perform the following, unphysical, test. We plug the SVAs, calculated as KT solutions for the $1^{-+}$ decay, into the linear combination given by the reconstruction theorem for $1^{--}$, see Eq.~\\eqref{eq:RT_1--}, and vice versa.\nThese unphysical amplitudes are denoted by ``mixed'' in Fig.~\\ref{fig:q_mass_diff}. We observe that this changes the absolute values of $q(N)$, while the qualitative behavior is the same. We therefore conclude that much of the sensitivity to rescattering effects is not actually due to the size of the lineshape modification of the SVAs, as shown in Fig.~\\ref{fig:basis}, but rather due to the specific linear combination in which they form the full decay amplitude.\n\nIn a log-plot for $N$ (for fixed $q$) as a function of the decay mass, we find a similar form as for $q$ (with fixed $N$); cf.\\ left and right panels of Fig.~\\ref{fig:mass_dep}. For large decay masses, the necessary number of events rises for all processes.\\footnote{Note that this effect is not fully visualized by the mass range displayed in Fig.~\\ref{fig:mass_dep}.} This is due to the fact that the KT solutions converge to the Omn{\\`e}s function in the infinite-mass limit.\nHowever, in the high-mass region, $M\\gtrsim 15M_\\pi$, our approximations are no longer valid: inelastic effects and higher partial waves play a non-negligible role. For low decay masses, approaching the three-pion threshold, the necessary event numbers for the $1^{--}$, $1^{-+}$, and $2^{++}$ decays rise due to limited phase space and kinematic suppression of the Dalitz-plot borders, far away from the $\\rho$ resonance. For $0^{--}$ this is different, since here the amplitude does not vanish at the edge of the Dalitz plot.\n\n\\begin{figure*}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_5.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_comp_5.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_7.25.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_comp_7.25.tex}}\n\t\\end{subfigure}\n\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_8.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_comp_8.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_14.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1-+/dal_comp_14.0.tex}}\n\t\\end{subfigure}\n\t\\caption{Dalitz plots for the $J^{PC}=1^{-+}$ reconstruction theorem. From top to bottom the decay mass grows according to $M=5/7.25/8/14\\,M_\\pi$, while the Dalitz plot without phase space $\\tilde{f}(s,t)$ defined via Eq.~\\eqref{eq:pdf_nophase} is shown in the left and $\\tilde{d}_\\text{KL}(s,t)$ (Eq.~\\eqref{eq:entropy_variance}) in the right column.}\n\t\\label{fig:dalitz_1mp}\n\\end{figure*}\n\nThe mass scan manifests several prominent features in the significance plot. \nFor $1^{-+}$ and $2^{++}$ decays, $N$ starts at $10^5$ events around the $\\omega$ mass and then rises steeply to approximately $2\\cdot10^6$ events at the $\\phi$ mass. At higher masses, it falls off up to about $12M_\\pi$. The very high number of necessary events is mainly due to the fact that crossing symmetry requires a zero in the Dalitz plot along the line $t=u$, and hence any differences due to rescattering have to appear at the edge of the Dalitz plot, where the phase space is suppressed by the Kibble function. The Dalitz plots for decays of a particle with $1^{-+}$ quantum numbers are shown in Fig.~\\ref{fig:dalitz_1mp} for different masses. Here the difference decreases \nuntil the $\\rho$ bands are inside the Dalitz plot, and then falls off again when the size increases further.\n\n\\begin{figure*}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_5.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_comp_5.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_7.25.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_comp_7.25.tex}}\n\t\\end{subfigure}\n\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_8.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_comp_8.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_14.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/1--/dal_comp_14.0.tex}}\n\t\\end{subfigure}\n\t\\caption{Dalitz plots for the $J^{PC}=1^{--}$ reconstruction theorem. From top to bottom the decay mass grows according to $M=5/7.25/8/14\\,M_\\pi$, while the Dalitz plot without phase space $\\tilde{f}(s,t)$ defined via Eq.~\\eqref{eq:pdf_nophase} is shown in the left and $\\tilde{d}_\\text{KL}(s,t)$ (Eq.~\\eqref{eq:entropy_variance}) in the right column.} \n\t\\label{fig:dalitz_1mm}\n\\end{figure*}\n\n\\begin{figure}\n \\centering\n \\includegraphics[width=0.48\\textwidth]{figures/configurations.pdf}\n \\caption{Sketches of different kinematic configurations for the $1^{--}$ decay with increasing decay mass. The straight lines denote the (qualitative) position of the three $\\rho$ bands in the Dalitz plot. The masses below the diagrams denote the decay masses for which the specific kinematic configurations are reached.}\n \\label{fig:configurations}\n\\end{figure}\n\nFor the $1^{--}$ decays, we find a different behavior. The event number $N$ starts at high values for the $\\omega$ resonance and then shows an overall decline with rising decay mass, with two small peaks between the $\\phi$ and $\\omega(1650)$. At the $\\phi$ resonance mass, the $\\rho$ bands are completely inside the Dalitz plot. The Dalitz plots for different decay masses are shown in Fig.~\\ref{fig:dalitz_1mm}. The first peak occurs due to the third kinematic configuration of the Dalitz plot as shown in Fig.~\\ref{fig:configurations}. The difference increases again when the three $\\rho$ bands cross in the middle of the Dalitz plot. The second peak is also due to a peculiar structure in the Dalitz plot: in this decay mass region we find a ring-shaped local minimum, clearly visible for an unphysically narrow $\\rho$ width; see \\ref{app:interferencering}. \nThe ring affects the sensitivity even for the physical $\\rho$ width, and is responsible for the second peak.\n\n\\begin{figure}\n \\centering\n \\fontsize{12pt}{14pt} \\selectfont\n \\scalebox{0.65}{\\input{plots/N_mass_dependence_fixed_1mm.tex}}\n \\caption{Minimal number of events $N$ to exclude the Omn{\\`e}s model at $5\\sigma$ significance for $J^{PC}=1^{--}$, as a function of the decay mass decomposed into two different effects.\n The red line corresponds to the full solution from the right panel in Fig.~\\ref{fig:mass_dep}. For the green line (``mass fixed''), we fix the size of the Dalitz plot to $M=7.5M_\\pi$ and only use the SVA varying with the running mass. For the blue line (``basis fixed''), we use the same SVA solution for $M=7.5M_\\pi$ for all masses and vary the size of the Dalitz plot.\n Vertical gray lines as in Fig.~\\ref{fig:mass_dep}.}\n \\label{fig:N_mass_fixed_1mm}\n\\end{figure}\n\nIn order to disentangle the origin of the various maxima and minima in the sensitivity of the $1^{--}$ decays in dependence on the decay mass a little better, we separate, somewhat unphysically, two different effects in Fig.~\\ref{fig:N_mass_fixed_1mm}: the size of the Dalitz plot, and modifications of the SVAs. We once keep the SVA basis function fixed as calculated for decay mass $M=7.5M_\\pi$ and only vary the size of the Dalitz plot; and secondly, we vice versa keep the Dalitz plot fixed at $M=7.5M_\\pi$, and only vary the SVA with its implicit decay-mass dependence. \nThe precise plot depends heavily on the choice for the fixed mass, but we observe both peaks when using a SVA for a fixed mass and only varying the size of the Dalitz plot. \nFixing the size of the Dalitz plot and only varying the SVAs, on the other hand, results in a rather smooth decrease of the difference towards higher masses. We therefore conclude that the peaks in the $1^{--}$ mass dependence are dominated by the structure of the Dalitz plot and not by the difference in the SVAs.\n\nThe main takeaway however is that with less than $10^5$ events above the $\\phi$ mass, rescattering effects will play an important role in analyses of Dalitz plot data. \nFor the $\\omega$ resonance and lower masses of the decaying particle, one requires more than $10^6$ events to observe them.\n\n\\begin{figure*}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_5.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_comp_5.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n \t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_7.25.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n \\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_comp_7.25.tex}}\n\t\\end{subfigure}\n\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_8.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_comp_8.0.tex}}\n\t\\end{subfigure}\n\t\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{12.5pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_14.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.49\\textwidth}\n\t\t\\fontsize{14pt}{14pt} \\selectfont\n \\scalebox{0.67}{\\input{plots/0--/dal_comp_14.0.tex}}\n\t\\end{subfigure}\n\t\\caption{Dalitz plots for the $J^{PC}=0^{--}$ reconstruction theorem. From top to bottom the decay mass grows according to $M=5/7.25/8/14\\,M_\\pi$, while the Dalitz plot $f(s,t)$ defined via Eq.~\\eqref{eq:pdf} is shown in the left and $\\tilde{d}_\\text{KL}(s,t)$ (Eq.~\\eqref{eq:entropy_variance}) in the right column.} \n\t\\label{fig:dalitz_0mm}\n\\end{figure*}\n\nFor $0^{--}$ decays, we find some small numerical fluctuations in the low-mass region, which are due to the multiple regions where the total decay amplitude is kinematically suppressed in the Dalitz plots, shown in Fig.~\\ref{fig:dalitz_0mm}: it vanishes along the three lines of $s=t$, $s=u$, and $t=u$. The number of events slightly rises up to approximately $N=10^6$ at the mass of the $\\omega$ and stays constant up to $9M_\\pi$. At higher masses, a slow decline sets in, including a small peak around $14M_\\pi$. Due to the structure of the reconstruction theorem, the differences are located near the Dalitz plot boundaries. The six regions of intensity are then split into regions with larger pdfs for the KT equations and the Omn{\\`e}s solutions, respectively. \n\nFinally, to test the dependence of our findings on the $\\rho$ resonance width, we repeat the above exercises using a phase shift extracted from a simple Breit--Wigner model with an energy-dependent width~\\cite{Ropertz:2018stk}, whose nominal width we fix to the smaller value $\\Gamma_\\rho=30\\,\\text{MeV}$. We find that the number of events required to distinguish rescattering effects is significantly increased by about two orders of magnitude. This confirms the expected trend that rescattering effects vanish in the limit of small widths. A short analytic derivation of this limit is presented in \\ref{app:narrow_res}.\n\n\\section{Conclusion} \\label{sec:conclusions}\n\nIn this article, we have investigated the feasibility of unambiguously identifying the crossed-channel rescattering effects beyond the simplest isobar model in three-pion decays dominated by $\\rho\\pi$ intermediate states. For four different quantum numbers of the decaying particles we have solved the Khuri--Treiman equations, integral equations that sum iterated two-pion rescattering in each pion pair to all orders. We have determined the minimal sample sizes for which Dalitz-plot distributions allow us to distinguish the KT solutions from the naive picture that ignores all effects beyond two-body resonances. The dependence on the mass of the decaying resonance has been studied in detail throughout.\n\nThe significance of the rescattering effects in $3\\pi$ final states is heavily dependant on the decay kinematics. In particular, the appearance and position of the $\\rho$ bands in the Dalitz plots plays a major role. For $J^{PC}=1^{--}$ we found a strong dependence on the mass of the decaying particle: rescattering effects are small for the $\\omega$ resonance, where at least a few times $10^5$ events in a Dalitz plot are necessary to identify them at $5\\sigma$ significance, while they are easily observable for the $\\phi$ resonance, with of the order of $10^4$ Dalitz plot events sufficient; a similar sensitivity is expected for decaying isoscalar vector resonances up to almost $2\\,\\text{GeV}$. However, the predictive power for large masses, e.g., the three-pion decays of the vector charmonia $J/\\psi$ or $\\psi'$~\\cite{BESIII:2012vmy}, is clearly limited due to inelastic effects and higher partial waves.\n\nIn general, the processes with zeros in the amplitude due to crossing symmetry need more statistics to resolve rescattering effects. This is the case for the isovector $3\\pi$ system with quantum numbers $1^{-+}$ and $2^{++}$, as well as isoscalar $0^{--}$ states, where for a wide mass range up to $2\\,\\text{GeV}$, we predict necessary statistics between $10^5$ and $10^6$ events to identify non-trivial rescattering effects at $5\\sigma$ significance. \n\nThroughout, our investigation should be understood as a pilot study towards the thorough implementation of Dalitz plot fits beyond the simplest isobar models. Theoretical limitations at this point clearly concern the constraint to Khuri--Treiman systems with one single free parameter: it is by no means guaranteed that the neglect of additional subtraction constants, which inter alia allow us to absorb effects of inelastic intermediate states, is justified in all circumstances. Furthermore, decays with several relevant partial waves, in particular isoscalar $\\pi\\pi$ $S$-waves with their strong coupling to $K\\bar K$ above $1\\,\\text{GeV}$, pose additional difficulties to pin down corrections to two-pion lineshapes unambiguously; these are known to play an important role in the interpretation of resonance signals in the $a_1$ spectrum~\\cite{COMPASS:2015kdx,Mikhasenko:2015oxp,COMPASS:2020yhb}. The systematic study of such more complicated KT systems remains a both formidable and highly rewarding challenge for future research.\n\n\\begin{acknowledgements}\n\\begin{sloppypar}\nWe thank Fabian Krinner for collaboration in an early stage of this project.\nFinancial support by the DFG through the funds provided to the Sino--German Collaborative Research Center TRR110 ``Symmetries and the Emergence of Structure in QCD'' (DFG Project-ID 196253076 -- TRR 110) is gratefully acknowledged.\nMM is funded by the Deutsche Forschungsgemeinschaft under Germany's Excellence Strategy -- EXC-2094 -- 390783311.\n\\end{sloppypar}\n\\end{acknowledgements}\n\n\\begin{appendix}\n\\section[1-+ reconstruction theorem]{$1^{-+}$ reconstruction theorem}\\label{app:1mp-RT}\n\nThis appendix is dedicated to the derivation of the $1^{-+}$ reconstruction theorem using fixed-$t$ dispersion relations.\nWe start by considering the scattering process\n\\begin{equation}\nX^i(p)\\pi^j(p_1)\\to\\pi^k(p_2)\\pi^l(p_3)\\,,\n\\end{equation}\nwhere the $T$-matrix element is given by\n\\begin{align}\n&\\bra{\\pi^k(p_2)\\pi^l(p_3)}T\\ket{X^i(p)\\pi^j(p_1)}\\notag\\\\\n&\\quad=(2\\pi)^4\\delta^{(4)}(p+p_1-p_2-p_3)\\mathcal{M}^{ijkl}(s,t,u)\\,,\n\\end{align}\nand the Mandelstam variables are defined as\n\\begin{align}\ns&=(p+p_1)^2=(p_2+p_3)^2\\,,\\notag\\\\\nt&=(p-p_2)^2=(p_1-p_3)^2\\,,\\notag\\\\\nu&=(p-p_3)^2=(p_1-p_2)^2\\,.\n\\label{eq:mandelstam}\n\\end{align}\nThe decomposition into the scalar amplitude and the kinematic prefactor due to the odd intrinsic parity can be found in Eq.~\\eqref{eq:amp_decomp}.\nThe isospin structure is identical to $\\pi\\pi$ scattering and the scalar amplitude therefore obeys the same decomposition\n\\begin{align}\n\\mathcal{H}^{ijkl}(s,t,u)&=\\delta^{ij}\\delta^{kl}\\mathcal{H}_s(s,t,u)+\\delta^{ik}\\delta^{jl}\\mathcal{H}_t(s,t,u)\\notag\\\\\n&+\\delta^{il}\\delta^{jk}\\mathcal{H}_u(s,t,u)\\,.\n\\end{align}\nDue to the symmetry of the process the amplitude needs to stay invariant under simultaneous exchanges of isospin indices and momenta, which relates $\\mathcal{H}_s$, $\\mathcal{H}_t$, and $\\mathcal{H}_u$ and leads to\n\\begin{align}\n\\mathcal{H}^{ijkl}(s,t,u)&=\\delta^{ij}\\delta^{kl}\\mathcal{H}(s,t,u)+\\delta^{ik}\\delta^{jl}\\mathcal{H}(t,u,s)\\notag\\\\\n&+\\delta^{il}\\delta^{jk}\\mathcal{H}(u,s,t)\\,.\n\\end{align}\nThe isospin projection operators are defined as\n\\begin{align}\n\\delta^{ij}\\delta^{kl}&=\\frac{1}{3}\\left(\\P_0^{ijkl}-\\P_2^{ijkl}\\right)\\,,\\notag\\\\\n\\delta^{il}\\delta^{jk}&=\\frac{1}{2}\\left(\\P_1^{ijkl}+\\P_2^{ijkl}\\right)\\,,\\notag\\\\\n\\delta^{ik}\\delta^{jl}&=\\frac{1}{2}\\left(\\P_2^{ijkl}-\\P_1^{ijkl}\\right)\\,,\n\\label{eq:isospin_proj}\n\\end{align}\nwhich allow us to rewrite the isospin decomposition of the scalar amplitude according to\n\\begin{align}\n\\mathcal{H}^{ijkl}(s,t,u)&=\\P_0^{ijkl}\\mathcal{H}^0(s,t,u)+\\P_1^{ijkl}\\mathcal{H}^1(s,t,u)\\notag\\\\\n&+\\P_2^{ijkl}\\mathcal{H}^2(s,t,u)\\,,\n\\end{align}\nresulting in\n\\begin{align}\n\\mathcal{H}(s,t,u)&=\\frac{1}{3}\\left(\\mathcal{H}^0(s,t,u)-\\mathcal{H}^2(s,t,u)\\right)\\,,\\notag\\\\\n\\mathcal{H}(t,u,s)&=\\frac{1}{2}\\left(\\mathcal{H}^1(s,t,u)+\\mathcal{H}^2(s,t,u)\\right)\\,,\\notag\\\\\n\\mathcal{H}(u,s,t)&=\\frac{1}{2}\\left(\\mathcal{H}^2(s,t,u)-\\mathcal{H}^1(s,t,u)\\right)\\,.\n\\end{align}\nThe partial-wave expansion of the isospin amplitudes proceeds via~\\cite{Jacob:1959at}\n\\begin{equation}\n\\mathcal{H}^I(s,z_s)=\\sum_{\\ell=1}P_\\ell'(z_s)a_\\ell^I(s)\\,.\n\\end{equation}\nA fixed-$t$ dispersion relation for $\\mathcal{H}(s,t,u)$ now yields\n\\begin{align}\n\\mathcal{H}(s,t,u) &= P^t(s,u) + \\frac{s^n}{2\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd s' \\frac{\\text{disc}_s \\mathcal{H}(s',t,u(s'))}{s'^n(s'-s)}\\notag\\\\\n&\\qquad +\\frac{u^n}{2\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd u' \\frac{\\text{disc}_u \\mathcal{H}(s(u'),t,u')}{u'^n(u'-u)}\\,,\n\\end{align}\nwhere we can insert the discontinuities according to\n\\begin{align}\n\\text{disc}_s\\,\\mathcal{H}(s',t,u(s'))&=0\\,,\\notag\\\\\n\\text{disc}_u\\,\\mathcal{H}(s(u'),t,u')&=-\\frac{1}{2}{\\rm disc}\\, a_1^1(u')\\,,\n\\end{align}\nneglecting all discontinuities in partial waves with $\\ell \\geq 2$.\nIf we employ fixed-$s$ and -$u$ dispersion relations in strict analogy to the above and symmetrize the results, we find\n\\begin{equation}\n\\mathcal{H}(s,t,u)=\\mathcal{H}(t)-\\mathcal{H}(u)\\,,\n\\end{equation}\nwhere\n\\begin{equation}\n\\mathcal{H}(s)=P_{n-1}(s)+\\frac{s^n}{4\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd s' \\frac{{\\rm disc}\\, a_1^1(s')}{s'^n(s'-s)}\\,.\n\\end{equation}\nThe ambiguity of this decomposition is discussed in Sec.~\\ref{sec:rt_1mp}.\nNote that in order to consistently define the SVAs from the symmetrized version of $\\mathcal{H}(s,t,u)$, $n\\leq2$.\nThe same holds true in the cases of $1^{--}$ and $2^{++}$. \nFor the $0^{--}$ decays, \n$n\\leq 3$ subtractions can be implemented.\n\n\\section[2++ reconstruction theorem]{$2^{++}$ reconstruction theorem}\\label{app:2pp-RT}\n\nIn this appendix we derive the reconstruction theorem for $J^{PC}=2^{++}$, applying the approximations needed for our analysis.\nAgain we start with the scattering process\n\\begin{equation}\nT^i(p)\\pi^j(p_1)\\to \\pi^l(p_2)\\pi^k(p_3)\\,,\n\\end{equation}\nwhere the Mandelstam variables are defined as in Eq.~\\eqref{eq:mandelstam}.\nWe can write down the amplitude in terms of two scalar amplitudes $\\mathcal{B}$ and $\\mathcal{C}$~\\cite{Albaladejo:2019huw},\n\\begin{align}\n\\mathcal{M}^{ijkl}(s,t,u)&=-i\\sqrt{2}\\epsilon_{\\mu\\nu}K^\\mu \\left[(p_2+p_3)^\\nu \\mathcal{B}^{ijkl}(s,t,u) \\right.\\notag\\\\\n&\\left.\\qquad+(p_2-p_3)^\\nu \\mathcal{C}^{ijkl}(s,t,u)\\right]\\notag\\,,\\\\\nK_\\mu&=\\epsilon_{\\mu\\nu\\alpha\\beta}p_1^\\nu p_2^\\alpha p_3^\\beta\\,,\n\\label{eq:amplitude_a2}\n\\end{align}\nwhich depend on the isospin indices.\nThe isospin decomposition of $\\mathcal{B}$ and $\\mathcal{C}$ once more proceeds analogously to $\\pi\\pi$ scattering,\n\\begin{align}\n\\mathcal{B}^{ijkl}(s,t,u)&=\\delta^{ij}\\delta^{kl}\\mathcal{B}_s(s,t,u)+\\delta^{il}\\delta^{jk}\\mathcal{B}_t(s,t,u)\\notag\\\\\n&+\\delta^{ik}\\delta^{jl}\\mathcal{B}_u(s,t,u)\\notag\\,,\\\\\n\\mathcal{C}^{ijkl}(s,t,u)&=\\delta^{ij}\\delta^{kl}\\mathcal{C}_s(s,t,u)+\\delta^{il}\\delta^{jk}\\mathcal{C}_t(s,t,u)\\notag\\\\\n&+\\delta^{ik}\\delta^{jl}\\mathcal{C}_u(s,t,u)\\,.\n\\end{align}\nThe amplitude needs to be invariant under the symmetry transformations\n\\begin{align}\nk&\\leftrightarrow l \\,, & p_2 &\\leftrightarrow p_3 \\,, & t &\\leftrightarrow u\\,;\\notag\\\\\nj&\\leftrightarrow l\\,, & p_1 &\\leftrightarrow -p_2\\,, & s &\\leftrightarrow t\\,;\\notag\\\\\nj&\\leftrightarrow k\\, , & p_1 &\\leftrightarrow -p_3\\,, & s &\\leftrightarrow u\\,,\n\\end{align}\nwhich gives us the following relations between the different $\\mathcal{B}$ and $\\mathcal{C}$ functions:\n\\begin{align}\n\\mathcal{B}_t(s,t,u)&=-2\\mathcal{B}_s(t,s,u)-3\\mathcal{C}_t(s,t,u)\\,,\\notag\\\\\n\\mathcal{C}_t(s,t,u)&=\\mathcal{B}_t(s,t,u)+2\\mathcal{C}_s(t,s,u)\\,,\\notag\\\\\n\\mathcal{B}_u(s,t,u)&=-2\\mathcal{B}_s(u,t,s)+3\\mathcal{C}_u(s,t,u)\\,,\\notag\\\\\n\\mathcal{C}_u(s,t,u)&=2\\mathcal{C}_s(u,t,s)-\\mathcal{C}_u(s,t,u)\\,.\n\\end{align}\nWe find that $\\mathcal{B}_s$ is antisymmetric in the last two arguments, while $\\mathcal{C}_s$ is symmetric. This helps us to rewrite the amplitude in terms of $\\mathcal{B}_s$ and $\\mathcal{C}_s$\n\\begin{align}\n&\\mathcal{M}^{ijkl}(s,t,u)=-i\\sqrt{2}\\epsilon_{\\mu\\nu}K^\\mu\\notag \\\\\n&\\times\\Big\\{\\delta^{ij}\\delta^{kl}\\big[(p_2+p_3)^\\nu \\mathcal{B}_s(s,t,u)+(p_2-p_3)^\\nu \\mathcal{C}_s(s,t,u) \\big]\\notag\\\\\n&\\quad+\\delta^{ik}\\delta^{jl}\\big[-p_3^\\nu \\mathcal{B}_s(u,t,s)+(p_3+2p_2)^\\nu \\mathcal{C}_s(u,t,s) \\big]\\notag\\\\\n&\\quad+\\delta^{ik}\\delta^{jl}\\big[-p_2^\\nu \\mathcal{B}_s(t,s,u)-(p_2+2p_3)^\\nu \\mathcal{C}_s(t,s,u) \\big]\\Big\\}\\,,\n\\end{align}\nand we drop the subscript $s$ in the following.\nThe isospin projection operators are defined in Eq.~\\eqref{eq:isospin_proj}.\nThe corresponding isospin amplitude can be decomposed in analogy to Eq.~\\eqref{eq:amplitude_a2}:\n\\begin{align}\n\\mathcal{M}^I(s,t,u)&=-i\\sqrt{2}\\epsilon_{\\mu\\nu}K^\\mu \\left[(p_2+p_3)^\\nu \\mathcal{B}^{I}(s,t,u) \\right.\\notag\\\\\n&\\left.\\qquad+(p_2-p_3)^\\nu \\mathcal{C}^{I}(s,t,u)\\right] \\,.\n\\end{align}\nThis leads to the following relations including the scalar isospin amplitudes:\n\\begin{align}\n\\mathcal{B}(s,t,u)&=\\frac{1}{3}\\left[\\mathcal{B}^0(s,t,u)-\\mathcal{B}^2(s,t,u)\\right]\\,,\\notag\\\\\n\\mathcal{C}(s,t,u)&=\\frac{1}{3}\\left[\\mathcal{C}^0(s,t,u)-\\mathcal{C}^2(s,t,u)\\right]\\,,\\notag\\\\\n\\mathcal{B}(t,s,u)&=-\\frac{1}{2}\\left[\\mathcal{B}^1(s,t,u)+\\mathcal{B}^2(s,t,u)\\right.\\notag\\\\\n&\\qquad\\left.+\\mathcal{C}^1(s,t,u)+\\mathcal{C}^2(s,t,u)\\right]\\,,\\notag\\\\\n\\mathcal{C}(t,s,u)&=-\\frac{1}{2}\\left[3\\mathcal{B}^1(s,t,u)+3\\mathcal{B}^2(s,t,u)\\right.\\notag\\\\\n&\\qquad\\left.-\\mathcal{C}^1(s,t,u)-\\mathcal{C}^2(s,t,u)\\right]\\,,\\notag\\\\\n\\mathcal{B}(u,t,s)&=-\\frac{1}{2}\\left[\\mathcal{B}^2(s,t,u)-\\mathcal{B}^1(s,t,u)\\right.\\notag\\\\\n&\\qquad\\left.-\\mathcal{C}^2(s,t,u)+\\mathcal{C}^1(s,t,u)\\right]\\,,\\notag\\\\\n\\mathcal{C}(u,t,s)&=\\frac{1}{2}\\left[3\\mathcal{B}^2(s,t,u)-3\\mathcal{B}^1(s,t,u)\\right.\\notag\\\\\n&\\qquad\\left.+\\mathcal{C}^2(s,t,u)-\\mathcal{C}^1(s,t,u)\\right]\\,.\n\\end{align}\nParity enforces the helicity-0 amplitude to vanish. We are therefore left with helicity-1 and helicity-2 amplitudes, whose partial-wave expansions start at $P$- and $D$-waves, respectively. As for all decays studied in this article, we wish to constrain the problem to $P$-waves only. \nTo this end, we use the connection of the helicity-1 and -2 amplitudes to $\\mathcal{B}$ and $\\mathcal{C}$ from Ref.~\\cite{Albaladejo:2019huw},\n\\begin{align}\n\\mathcal{C}^{ijkl}(s,t,u)&=\\frac{1}{\\alpha(s,t,u)}\\mathcal{A}_2^{ijkl}(s,t,u)\\notag\\,,\\\\\n\\mathcal{B}^{ijkl}(s,t,u)&=\\frac{1}{\\beta(s,t,u)}\\left[\\mathcal{A}_1^{ijkl}(s,t,u) \\right.\\notag\\\\\n&\\qquad\\qquad\\left.\n-\\gamma(s,t,u)\\mathcal{C}^{ijkl}(s,t,u)\\right]\\,.\n\\end{align}\nThe only relevant kinematic factor $\\beta$ is defined as\n\\begin{align}\n&\\beta(s,t,u)\n=\\frac{\\lambda_T(s)\\lambda^{1/2}_\\pi(s)\\sin\\theta_s}{8\\sqrt{2s}M}\\,,\n\\end{align}\nwhere\n\\begin{align}\n\\lambda_T(s)&=\\lambda(s,M^2,M_\\pi^2)\\,,\\quad\n\\lambda_\\pi(s)=\\lambda(s,M_\\pi^2,M_\\pi^2)\\,.\n\\end{align}\nDue to the constraint to $P$-waves, the amplitude $\\mathcal{C}$ needs to vanish and $\\mathcal{B}$ is given entirely by $\\mathcal{A}_1$ and the kinematic factor $\\beta$. $\\mathcal{A}_1$ contains kinematic singularities, but has a well-defined partial-wave expansion for fixed isospin $I$:\n\\begin{align}\n\\mathcal{A}^I_\\lambda(s,t,u)&=\\sum_{j\\geq |\\lambda|}(2j+1)a^{\\lambda I}_j(s)d_{\\lambda 0}^j(z_s) \\notag\\\\\n&=\\sum_{j\\geq |\\lambda|}(2j+1)K_{j\\lambda}(s,t,u)\\hat{a}^{\\lambda I}_j(s)\\hat{d}_{\\lambda 0}^j(z_s) \\notag\\\\\n&= 3K_{11}(s,t,u)\\hat{a}^{1 I}_1(s)\\hat{d}_{1 0}^1(z_s) + \\ldots \\,, \\label{eq:PWE_general}\n\\end{align}\nwhere in particular\n\\begin{equation}\nK_{11}(s,t,u)=\\frac{1}{4\\sqrt{s}}\\sin\\theta_s \\lambda^{1/2}_\\pi(s)\\,,\n\\end{equation}\nand\nthe Wigner $d$-matrices are given by\n\\begin{align}\nd_{\\lambda 0}^j(z_s) &= \\hat{d}_{\\lambda 0}^j(z_s)\\sin^{|\\lambda|}\\theta_s\\,, \\quad\nd_{10}^1(z_s)=-\\frac{\\sin\\theta_s}{\\sqrt{2}}\\,.\n\\end{align}\nHere, $\\hat{a}^{\\lambda I}_j(s)$ are the partial-wave amplitudes, which are however not yet free of kinematic constraints; see below.\nWe use the relation between $\\mathcal{A}_1$ and $\\mathcal{B}$ to reduce $\\mathcal{B}$ to its leading partial wave. Neglecting all higher ones (denoted by ellipses), we find\n\\begin{align}\n\\mathcal{B}^I(s,t,u) &= \\frac{1}{\\beta(s,t,u)} \\mathcal{A}_1^I(s,t,u) + \\ldots \\notag\\\\\n&= \\frac{3}{\\beta(s,t,u)} K_{11}(s,t,u)\\hat{d}_{1 0}^1(z_s) \\hat{a}^{1I}_1(s) + \\ldots \\notag\\\\\n&= -\\frac{6M}{\\lambda_T(s)}\\hat{a}^{1I}_1(s) + \\ldots \\equiv -\\Tilde{a}^{1I}_1(s) + \\ldots\\,. \\label{eq:PWE}\n\\end{align}\nNote that a kinematic constraint needs to be enforced on $\\hat{a}^{1I}_1$ to cancel the zeros of $\\lambda_T(s)$. As a consequence, $\\tilde{a}^{1I}_1$ is now a partial wave free of any kinematic singularities and zeros, and therefore apt for a generalized Omn{\\`e}s representation.\nUsing that the $P$-waves are pure isospin $I=1$, we can give the relations for the discontinuities of $\\mathcal{B}$\n\\begin{align}\n\\text{disc}_s\\, \\mathcal{B}(s',t,u(s')) &= 0\\,,\\notag\\\\\n\\text{disc}_u\\, \\mathcal{B}(s(u'),t,u') &= -\\frac{1}{2} {\\rm disc}\\, \\Tilde{a}^{11}_1(u')\\,.\n\\end{align}\nWriting down a fixed-$t$ dispersion relation for $\\mathcal{B}$\n\\begin{align}\n\\mathcal{B}(s,t,u) &= P^t(s,u) + \\frac{s^n}{2\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd s' \\frac{\\text{disc}_s\\, \\mathcal{B}(s',t,u(s'))}{s'^n(s'-s)}\\notag\\\\\n&\\quad+\\frac{u^n}{2\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd u' \\frac{\\text{disc}_u\\, \\mathcal{B}(s(u'),t,u')}{u'^n(u'-u)}\\,,\n\\end{align}\nand inserting the expansion from Eq.~\\eqref{eq:PWE} into partial waves in fixed-$s$, -$t$, and -$u$ dispersion relations, we find\n\\begin{align}\n\\mathcal{B}(s,t,u) &= P^t(s,u) - \\frac{u^n}{4\\pi i}\\int_{4M_\\pi^2}^\\infty \\dd u' \\frac{{\\rm disc}\\, \\Tilde{a}_{1}^{11}(u')}{u'^n(u'-u)}\\,,\\notag\\\\\n\\mathcal{B}(s,t,u) &= P^u(s,t) + \\frac{t^n}{4 \\pi i}\\int_{4M_\\pi^2}^\\infty \\dd t' \\frac{{\\rm disc}\\, \\Tilde{a}_{1}^{11}(t')}{t'^n(t'-t)}\\,,\\notag\\\\\n\\mathcal{B}(s,t,u) &= P^s(t,u) + \\frac{t^n}{4 \\pi i}\\int_{4M_\\pi^2}^\\infty \\dd t' \\frac{{\\rm disc}\\, \\Tilde{a}_{1}^{11}(t')}{t'^n(t'-t)}\\notag\\\\\n&\\qquad - \\frac{u^n}{4 \\pi i}\\int_{4M_\\pi^2}^\\infty \\dd u' \\frac{{\\rm disc}\\, \\Tilde{a}_{1}^{11}(u')}{u'^n(u'-u)}\\,.\n\\end{align}\nSymmetrizing these equations yields\n\\begin{align}\n\\mathcal{B}(s,t,u)&= \\mathcal{B}(t)-\\mathcal{B}(u)\\,,\\notag\\\\\n\\mathcal{B}(s)&=P_{n-1}(s)+\\frac{s^n}{4 \\pi i} \\int_{4M_\\pi^2}^\\infty \\dd s' \\frac{{\\rm disc}\\, \\Tilde{a}_1^{11}(s')}{s'^n(s'-s)}\\,.\n\\end{align}\nThis is the same reconstruction theorem as for $1^{-+}$,\nwith, as a consequence, the same ambiguity discussed in the main text.\nNote that, as far as the necessary kinematic factors are concerned, the amplitude decomposition with $P$-waves only can be cross-checked with a Lagrangian-based calculation of $a_2\\to\\rho\\pi$~\\cite{Giacosa:2005bw}.\n\n\\section{Interference ring}\\label{app:interferencering}\n\n\\begin{figure*}\n \\hspace{-0.8cm}\n\t\\begin{subfigure}{0.33\\textwidth}\n\t \\fontsize{13pt}{14pt} \\selectfont\n \\scalebox{0.55}{\\input{plots/1--_small/dal_10.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.33\\textwidth}\n\t \\fontsize{13pt}{14pt} \\selectfont\n \\scalebox{0.55}{\\input{plots/1--_small/dal_11.0.tex}}\n\t\\end{subfigure}\n\t\\begin{subfigure}{0.33\\textwidth}\n\t \\fontsize{13pt}{14pt} \\selectfont\n \\scalebox{0.55}{\\input{plots/1--_small/dal_14.0.tex}}\n\t\\end{subfigure}\n\t\\caption{Logarithm of the intensity in the Dalitz plots for $J^{PC}=1^{--}$ with smaller $\\rho$ width, see \\ref{app:interferencering}. From left to right, we show the decay masses $M=10/11/14\\,M_\\pi$. The blue line is the solution of Eq.~\\eqref{eq:inter_param}.}\n\t\\label{fig:dalitz_1mm_inter}\n\\end{figure*}\n\nWhen studying the Dalitz plots of the $1^{--}$ decays, we find a remarkable property induced by the symmetry of the process: a ring-shaped local minimum in the logarithmic intensity that crosses all three intersection points of the $\\rho$ bands. However, this only becomes visible when using an unphysically narrow $\\rho$ resonance. We again (cf. \\ Sec.~\\ref{sec:results}) employ a phase shift from a simple Breit--Wigner model with the energy-dependent width from Ref.~\\cite{Ropertz:2018stk} and the nominal width $\\Gamma_\\rho=30\\,\\text{MeV}$; therefore the resonance bands in the Dalitz plots are much narrower. When looking at plots with $M^2>3(M_\\rho^2-M_\\pi^2)$, we obtain an interference ring that crosses all three intersection points of the three $\\rho$ bands. Transforming to the well-known Dalitz plot variables~\\cite{Dalitz:1953cp,Fabri:1954zz,Weinberg:1960zza}\n\\begin{align}\nx&=\\frac{\\sqrt{3}}{2MQ}(t-u)\\,,\\notag\\\\\ny&=\\frac{3}{2MQ}\\left((M-M_\\pi)^2-s\\right)-1\\,,\\notag\\\\\nQ&=M-3M_\\pi\n\\end{align}\nrenders the ring to a perfect circle. Using the three intersection points $s=t=M_\\rho^2$, $s=u=M_\\rho^2$, and $t=u=M_\\rho^2$, we obtain three equations of the form\n\\begin{equation}\n(x-x_c)^2+(y-y_c)^2-r^2=0\\,.\n\\end{equation}\nThese can be solved for the center and the radius of the circle in this parameterization:\n\\begin{align}\nx_c&=0\\,,\\qquad y_c=0\\,,\\notag\\\\\nr^2&=\\left(\\frac{3(M_\\rho^2-M_\\pi^2)-M^2}{MQ}\\right)^2\\,.\n\\end{align}\nReverting back to the Mandelstam variables, we find that the ring can be determined using the formula for the circle $x^2+y^2-r^2=0$ and solving for\n\\begin{align}\nt(s)&=\\frac{1}{2}\\left(M^2+3M_\\pi^2-s\\pm\\sqrt{M_1(s)M_2(s)}\\right)\\,,\\notag\\\\\nM_1(s)&=M^2+3M_\\pi^2-s-2M_\\rho^2\\notag\\,,\\\\\nM_2(s)&=M^2+3(M_\\pi^2+s-2M_\\rho^2)\\,,\\label{eq:inter_param}\n\\end{align}\nin the $s$ domain\n\\begin{align}\n-\\frac{M^2}{3}-M_\\pi^2+2M_\\rho^2\\leq s\\leq M^2+3M_\\pi^2-2M_\\rho^2\\,.\n\\end{align}\nThis allows us to plot the interference ring in the Dalitz plot (cf. \\ Fig.~\\ref{fig:dalitz_1mm_inter}). For large masses, a sizeable part of it is outside of the Dalitz plot or close to its boundary, where the phase space is small. Using our realistic parameterization for the $\\rho$ resonance, \nthis feature is washed out by the broadness of the $\\rho$ resonance.\n\n\\section{Rescattering effects for narrow resonances}\\label{app:narrow_res}\n\nWe assume that for a narrow-width resonance, the Omn{\\`e}s function behaves approximately like a Breit--Wigner parameterization~\\cite{Breit:1936zzb}\n\\begin{equation}\n\\Omega(s)=\\frac{M^2}{M^2-s-iM\\Gamma}\\,,\\label{eq:BW}\n\\end{equation}\nwhere the phase is given by\n\\begin{equation}\n\\delta(s)=\\arctan\\left(\\frac{M\\Gamma}{M^2-s}\\right)\\,.\\label{eq:BW_phase}\n\\end{equation}\nThis assumption is justified since a zero-width phase given by\n\\begin{equation}\n\\delta(s)=\\pi \\theta(s-M^2)\\,.\n\\end{equation}\nleads to \n\\begin{equation}\n\\Omega(s)=\\frac{M^2}{M^2-s}\\,,\n\\end{equation}\nwhich is a Breit--Wigner function with zero width.\nBy using Eqs.~\\eqref{eq:BW} and \\eqref{eq:BW_phase}, the phase-shift-dependent fraction inside the dispersion integral over the inhomogeneity in Eq.~\\eqref{eq:inhomOmnes-sol} reads\n\\begin{equation}\n\\frac{\\sin\\delta(s)}{|\\Omega(s)|}=\\frac{\\Gamma}{M}\\,.\n\\end{equation}\nThe general KT solution~\\eqref{eq:inhomOmnes-sol} therefore reduces to\n\\begin{align}\n\\mathcal{X}(s)&=\\Omega(s)\\Bigg(P_{n-1}(s)+\\frac{\\Gamma}{M}\\cdot\\frac{s^n}{\\pi}\\int_{4M_\\pi^2}^\\infty \\frac{\\dd s'}{s'^n}\\frac{\\widehat{\\mathcal{X}}(s')}{(s'-s)}\\Bigg)\\,,\n\\end{align}\nmaking it amply clear that all crossed-channel rescattering effects are suppressed in the limit of a narrow-width resonance for $\\Gamma\\to 0$. \n\n\\end{appendix}\n\n\\bibliographystyle{utphysmod}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"} +{"id": "red-arxiv-7", "source_id": "red-arxiv_7_red-arxiv-7", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction} \\label{chap:introduction}\n \n Time-series forecasting has become a central problem in machine learning research as most collected industrial data is being recorded over time. A specific application for time-series forecasting approaches is human motion prediction (or human pose forecasting), in which a multivariate time-series is given in the form of a human joint skeleton, and the objective is to forecast motion sequences based on the previous observations. This area has recently seen various applications ranging from healthcare \\cite{taylor2020intelligent} and smart homes \\cite{jalal2019wrist} to robotics \\cite{unhelkar2018human,liu2019deep}. Deep learning methods have shown state-of-the-art performances in the task of human motion prediction in recent years with a focus on popular time-series forecasting models including LSTM's and GRU's \\cite{martinez2017human}, temporal autoencoders \\cite{butepage2017deep}, and more recently transformer-based approaches \\cite{mao2020history,mao2021multi}. Moreover, employing graph-based models has shown to be advantageous in cases where the human joint skeleton can be utilized \\cite{li2020dynamic}.\n \n \\begin{figure}[t] \\label{fig:pred} \\centering\n \\includegraphics[width=0.042\\textwidth]{a.pdf} \\includegraphics[width=0.91\\textwidth]{walk_cropped.pdf}\\\\\n \n \\includegraphics[trim={0.1cm 5cm 0.1cm 1.750cm},clip,width=0.042\\textwidth]{b.pdf} \\includegraphics[width=0.91\\textwidth]{walk_arm.pdf}\\\\\n \n \\includegraphics[trim=0.1cm 3.5cm 0.1cm 1cm,clip,width=0.042\\textwidth]{c.pdf} \\includegraphics[width=0.91\\textwidth]{walk_legs.pdf}\\\\ \n \n \\caption{Examples of three motion prediction tasks with observations (red) and forecasts (teal). (a) being a standard motion prediction on the full graph, while (b) and (c) are forecasts based on only a subgraph of the sensor skeleton learned by the same model.}\n \\label{fig:motion}\n \\end{figure}\n \n In few-shot motion prediction, we strive to forecast the motion for previously unseen actions using only a few labeled examples, in contrast to standard human motion prediction, where the training dataset already contains sufficient samples for each action that will be encountered during testing. This can be highly beneficial in practice, as it eliminates the need for such a dataset and allows for a more flexible application. For example, end users can then add new motions by demonstrating an action a few times before the model can accurately classify and forecast future frames. Current approaches for motion prediction are limited to a fixed attribute space such that every observation needs to be recorded across the same set of input sensors. However, an ideal model should be able to cope with only a subset of motion sensors, as not every user should be required to have motion sensors for the full human skeleton. Also, not every action requires information from every possible sensor, e.g., recordings of only the arm for the motion \"waving.\" An example of this is shown in Figure~\\ref{fig:motion}, where a motion prediction for the complete human skeleton, but also partial subgraphs of it, is demonstrated. In few-shot learning, this setup is referred to as learning across tasks with heterogeneous attributes \\cite{iwata2020meta,brinkmeyer2022few} and is typically tackled by employing a model which operates on attribute sets (in contrast to vectors) which inherently do not possess any order.\n \n In human motion prediction, the attributes represent sensors distributed on a human skeleton \\cite{h36m_pami,IonescuSminchisescu11}, meaning they possess order in the form of a graph structure. This information is often used in approaches for classical human motion prediction but not in the current literature for few-shot motion prediction. In a few-shot setting for tasks with heterogeneous sensors, the model would encounter varying graphs in training, similar to classical graph classification approaches \\cite{kipf2016semi}. In this chosen scenario, each motion prediction task has a different set of sensors (attributes) that are shared across their subjects, each frame (or pose) corresponds to one time-step, and, finally, the placement of the existing sensors on the subject's body is represented by the task's graph.\n In this work, we propose the first model for few-shot motion prediction that incorporates the underlying graph information while generalizing over tasks with heterogeneous sensors. We evaluate our approach on different variations of the popular Human3.6M dataset and demonstrate improvements over all related methods.\n The contributions of this work are the following:\n \n \\begin{enumerate}\n \\item We propose the first model for few-shot motion prediction that incorporates the underlying graph structure, while also being the first model for few-shot motion prediction which generalizes to motion tasks with heterogeneous sensors.\n \\item We conduct the first few-shot human motion experiments on tasks with heterogeneous sensors where we can show significant performance improvements over all related baselines with performance lifts ranging from $10.4\\%$ to $39.3\\%$.\n \\item We demonstrate minor performance improvements over state-of-the-art approaches in the standard experimental setup while maintaining two magnitudes fewer parameters within our model.\n\t \\item We also provide code for our method as well as for two of our baselines that have not published a working implementation.\n \\end{enumerate} \\section{Related Work} \\label{chap:relatedwork}\n\nThis work lies in the intersection of few-shot learning (FSL) and human motion prediction. Thus we will discuss the related work of both areas before summarizing the work in the analyzed field. FSL \\cite{wang2020generalizing} aims to achieve a good generalization on a novel task that contains only a few labeled samples based on a large meta-dataset of related tasks. There are different techniques, including metric-based \\cite{snell2017prototypical}, gradient-based \\cite{finn2017model}, and memory-based approaches \\cite{yoon2019tapnet}, that have shown successful results. They typically all involve meta-training across the meta-dataset while performing some adaptation to the test task at hand.\n\nRecently, different works have tried to extend few-shot learning to generalize across tasks that vary in their input \\cite{brinkmeyer2019chameleon} or output space \\cite{drumond2020hidra}. One is to apply permutation-invariant and -equivariant models that operate on sets of elements through the use of deep sets \\cite{zaheer2017deep}. \\textsc{TimeHetNet}{} \\cite{brinkmeyer2022few} extended this approach to perform few-shot time-series forecasting on tasks with a single target variable and a varying amount of covariates. \\textsc{chameleon} \\cite{brinkmeyer2019chameleon} allows vector data-based tasks to have different shapes and semantics as long as the attributes can be mapped to a common alignment. All these methods, however, did not consider any structural relation between the attributes and operate purely on sets of scalar attributes.\n\nMotion Forecasting (or Pose Forecasting, or Pose Estimation) is the task of predicting the subsequent frames of a sequence of human poses. This data can be collected directly as images, or with accelerometers and gyroscopes \\cite{Parsaeifard_2021_ICCV}. Most approaches naturally rely on standard deep learning methods for time-series forecasting such as Variational Auto Encoders, LSTMs, and recurrent convolution networks \\cite{drumond2018peek,taylor2020intelligent,jalal2019wrist,liu2019deep,unhelkar2018human}. These methods are devised for different motion applications that vary in the type of sensors or forecasting length. For example, \\textsc{Peek} \\cite{drumond2018peek} and the work of Jalal et al. \\cite{jalal2019wrist} require only motion data from the arms, while Gui et al. \\cite{gui2018few} use the rotation of the main joints of the complete human body to predict future time-steps. None of these approaches, however, are designed to handle tasks where the set of motion sensors varies.\n\nThere are two recent approaches published for few-shot human prediction that we will focus on in this paper as baselines. \\textsc{paml} \\cite{gui2018few} consists of the popular meta-learning approach \\textsc{maml} \\cite{finn2017model} operating on top of the classical motion prediction model \\textsc{residual-sup} \\cite{martinez2017human}. It incorporates a simple look-ahead method for the decoder weights based on pre-trained weights on a bigger dataset to fine-tune the model for a new task. \\textsc{MoPredNet}~\\cite{zang2021few,zang2022few} is a memory-based approach that uses attention and an external memory of pretrained decoder weights to compute the weights for a new task. Although these two methods work with different tasks separated by the human action performed in each pose sequence, they require the same set of sensors for each task.\n \n In this paper, we present \\textsc{GraphHetNet}{} (\\textsc{GHN}): a graph-based approach to adapt the \\textsc{TimeHetNet}{} \\cite{brinkmeyer2022few} architecture to train across different human motion detection tasks with heterogeneous sensors by integrating information of neighboring sensors through the application of graph convolutional networks \\cite{kipf2016semi}. Thus, we can combine both graph and time-series information into our few-shot predictions.\n \n \n\n\n\n\n\n \\section{Methodology} \\label{chap:methodology}\n\n\\subsection{Problem definition}\n\nWe formulate few-shot motion prediction as a multivariate temporal graph problem. In standard human motion prediction, we are given a graph $\\mathcal{G}=(\\mathcal{V},A)$ as predictor data where the vertex set $\\mathcal{V}$ consists of $C$ motion sensors $\\{1,...,C\\}$ and $A\\in \\mathbb{R}^{C\\times C}$ is a symmetric adjacency matrix representing the edges between sensors with $A_{ij}=1$ iff sensors $i$ and $j$ are connected by an edge, e.g., an elbow and the shoulder. We also refer to this graph as motion graph, as it contains all the motion sensors. Additionally, we are given a set of node features $X=\\{x_{ict}\\}\\in \\mathbb{R}^{I\\times T \\times C}$ which represent a multivariate time-series with $I$ instances over $T$ time steps for the $C$ motion sensors. We want to forecast the next $H$ time steps given the observed $T$ such that our target is given by $Y\\in\\mathbb{R}^{I\\times H\\times C}$.\\\\\nExtending this formulation to few-shot learning, we are given a set of $M$ tasks $D:=\\{(D_1^s,D_1^q),...,(D_M^s,D_M^q)\\}$ called meta-dataset where each task consists of support data $D^s$ and query data $D^q$ with $D^s_m:=(\\mathcal{G}_m,X^s_m,Y^s_m)$ and $D^q_m:=(\\mathcal{G}_m,X^q_m,Y^q_m)$. The graph is shared across instances of both support and query for a given task. We want to find a model $\\phi$ with minimal expected forecasting loss over the query data of all tasks when given the labeled support data, and predictor of the query data:\n\\begin{equation}\n \\min_\\phi \\frac{1}{M} \\sum_{(D^s_m,D^q_m)\\in D} \\mathcal{L}(Y^q_m,\\phi(G_m,X^q_m,D^s))\n\\end{equation}\nIn the standard setting $\\mathcal{G}_m=\\mathcal{G}_{m'} \\ \\forall m,m'\\in M \\ (m\\neq m')$, which means that the structure of the graph $\\mathcal{G}$ does not vary across the meta-dataset. Thus, each sample of each task contains the same set of motion sensors $\\mathcal{V}$ with an identical adjacency matrix $A$. We want to generalize this problem to tasks with heterogeneous sensors, meaning that the underlying graph structure and the set of vertices vary across tasks ($\\mathcal{G}_m\\neq \\mathcal{G}_{m'} \\ \\forall m,m'\\in M\\ (m\\neq m')$), while it is shared between support and query data of the same task. Thus, the number of motion sensors $C$ is not fixed and depends on the task at hand.\n \n\\subsection{GraphHetNet}\n\n\\begin{figure}[t] \\label{fig:network} \\centering\n \\includegraphics[width=0.96\\textwidth]{network5.pdf}\n \\caption{The pipeline for our proposed approach \\textsc{GraphHetNet}. \\textit{DS Block} stands for Deep Set Block and \\textit{GCN Block} for Graph Convolution Network Block. The network takes the full support data $D^s=(\\mathcal{G},X^s,Y^s)$ and the predictors of the query data $(\\mathcal{G},X^q)$ and outputs a set of outputs $\\hat{y}$ represent the next $H$ frames after the $T$ frames of the instances in $D^q$. Batch dimensions are omitted for simplicity.}\n\\end{figure}\nOur model \\textsc{GraphHetNet}{} denoted by $\\phi$ is based on \\textsc{TimeHetNet}{} \\cite{brinkmeyer2022few}, which uses a set approach for few-shot time-series forecasting with heterogeneous attributes similar to the approach of Iwata et al. \\cite{iwata2020meta}. The overall architecture consists of two main components: First, the inference network, which processes the predictor and target data of the support set $D^s$ of a task to generate a latent task representation which should contain useful information to forecast the query instances. Second, the prediction network computes the actual motion forecast for the query set $D^q$ of the task at hand based on its predictors and the task embedding of the support network. In prior approaches \\cite{iwata2020meta,brinkmeyer2022few}, both components are composed of multiple stacked deep set blocks (\\textit{DS Block}), which process the input data as a set of attributes. To compute the embeddings for every single vertex $c\\in C$ over the instances $I$ of the support data $D^s$, a single layer in such a block is then a deep set layer \\cite{zaheer2017deep}:\n\\begin{equation}\n w_c= g_{\\text{\\textsc{ds}}}\\left(\\frac{1}{I} \\sum_{i=1}^{I} f_{\\text{\\textsc{ds}}}(x_{ic})\\right) \\ \\forall c\\in C\n \\label{eq:dsb}\n\\end{equation}\nHere, $w_c\\in \\mathbb{R}^{T\\times K}$ with K being the latent output dimension of $g_{\\text{DS}}$. By employing an inner function $f_{\\text{\\textsc{ds}}}:\\mathbb{R}^{T\\times 1}\\rightarrow \\mathbb{R}^{T\\times K}$ on each element of the set of instances $X$, and an outer function $g_{\\text{\\textsc{ds}}}:\\mathbb{R}^{T\\times K}\\rightarrow \\mathbb{R}^{T\\times K}$ on the aggregation of this set, we can model a permutation-invariant layer that operates on the set of instances. The theoretical foundation of this layer lies in the Kolmogorov-Arnold representation theorem, which states that any multivariate continuous function can be written as a finite composition of continuous functions of a single variable and the binary operation of addition~\\cite{schmidt2021kolmogorov}.\n\nIn contrast to previous approaches that operate on heterogeneous attributes, we do not utilize \\textit{DS} blocks to aggregate the information across attributes, but only across instances, as our problem's attributes are motion sensors structured in a graph and not in a set. Instead, we include blocks of graph convolutional layers (\\textit{GCN Block}) \\cite{kipf2016semi} in both the inference and the prediction network. We can then aggregate information across sensors by stacking graph convolutional layers. A single layer in the block is then defined as:\n\\begin{equation}\n u_{ic}= g_{\\text{\\textsc{gcn}}}\\left(\\left[x_{ic},\\sum_{j\\in N(c)} f_{\\text{\\textsc{gcn}}}(x_{ij})\\right]\\right) \\ \\forall c\\in C \\ \\forall i\\in I\n\\end{equation}\nwhere $u_{ic}\\in\\mathbb{R}^{T\\times K}$, $N(c)$ is the set of all vertices that are in the neighborhood of $c$ meaning $A_{cj}=1$ for every $j\\in N(c)$ and $[.]$ is the concatenation along the latent feature axis. The inner function $f_{\\text{\\textsc{gcn}}}:\\mathbb{R}^{T\\times 1}\\rightarrow \\mathbb{R}^{T\\times K}$ prepares the neighbor embeddings, while the outer function $g_{\\text{\\textsc{gcn}}}:\\mathbb{R}^{T\\times 2K}\\rightarrow \\mathbb{R}^{T\\times K}$ updates the vertex features of the respective sensor with its aggregated neighbor messages. Note that this layer only captures the information across motion sensors, not instances. The models $g_{\\text{\\textsc{gcn}}},f_{\\text{\\textsc{gcn}}},g_{\\text{\\textsc{ds}}},f_{\\text{\\textsc{ds}}}$ are Gated recurrent units (GRU) to deal with the temporal information.\nAs shown in Figure \\ref{fig:network}, our full model \\textsc{GraphHetNet}{} $\\phi$ consists of the two model components inference and prediction network. The inference network $\\psi_{\\text{inf}}$ processes the full support data $D^s$ to compute the task embeddings across instances and motion sensors. The prediction network $\\psi_{\\text{pred}}$ processes the query data to output the final forecast. Thus, the prediction $\\hat{Y}$ of our model for a task $m$ is given by:\n\\begin{equation}\n \\hat{Y}_m=\\phi(X^q_m,G^q_m,D^s) = \\psi_{\\text{pred}}(X^q_m,G^q_m,\\psi_{\\text{inf}}(G^s_m,X^s_m,Y^s_m))\n\\end{equation}\nThe inference model $\\psi_{\\text{inf}}$ is composed of a \\textit{GCN} block in between two \\textit{DS} blocks to capture both information across instances and motion sensors. The prediction network $\\psi_{\\text{pred}}$ consists of a \\textit{GCN} block, followed by a block of stacked GRU layers (\\textit{GRU Block}) which compute the target motion forecast.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \\section{Results} \\label{chap:experiments}\n\nWe conducted multiple experiments on the Human3.6M dataset \\cite{h36m_pami,IonescuSminchisescu11}, which consists of 17 motion categories recorded for 11 subjects, resulting in 3.6 million frames in total. We want to evaluate our approach for few-shot motion tasks with heterogeneous sensors such that each task contains a subset of the vertices of the full motion graph, with the graph of each task being an induced subgraph of the original one. We also conduct an ablation on the standard few-shot motion prediction setting proposed in prior approaches \\cite{gui2018few,zang2021few,zang2022few} that considers homogeneous tasks only, meaning each task contains all sensors of the Human3.6M in identical order.\n\n\\subsection{Experimental setup}\nIn both cases, we have 11 actions in meta-training (directions, greeting, phoning, posing, purchases, sitting, sitting down, taking a photo, waiting, walking a dog, and walking together) and 4 actions in meta-testing (walking, eating, smoking, and discussion). Furthermore, we also utilize the same split across subjects for meta-test and meta-training as proposed by Gui et al. \\cite{gui2018few}. The task is to forecast the next 10 frames (400ms) given the previous 50 frames (2 seconds) across the given set of sensors. A single task consists of five support instances and two query instances which means that the model needs to adapt to a previously unseen action based on five labeled instances only. During meta-training, each meta-batch consists of one task per action in meta-training totaling 11 tasks. The tasks in the classical setting contain all nonzero angles for each of the 32 joints totaling 54 angles as motion sensors. In our main experiment on heterogeneous sensors, each task has only a subset of the set of all sensors. In particular, we sample an induced subgraph of the original human skeleton graph by selecting a random sensor as the initial root node and then recursively adding a subset of neighboring vertices to the graph, including all edges whose endpoints are both in the current subset. The statistics of the original motion graph of Human3.6M and our sampled induced subgraphs are given in Table~\\ref{tab:stats}. The number of unique tasks we sample during our experiments is enormous as is the number of possible induced subgraphs from a given source graph. We evaluated this empirically by sampling one million tasks from the full graph and found around 842,872 unique tasks, meaning only around $16\\%$ of the subgraphs were sampled more than once. This guarantees that many tasks our model encounters during meta-testing are previously unseen.\n\n\\begin{table}[t]\n\\begin{tabular}{p{2cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.4cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}}\n\\multicolumn{1}{l|}{} & & walking & & \\multicolumn{1}{l|}{} & & \\multicolumn{1}{l|}{} & & smoking & & \\multicolumn{1}{l|}{} & \\\\\n\\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & Avg & \\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & Avg \\\\ \\cline{1-6} \\cline{8-12} \n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{single}}$ \\cite{martinez2017human}} & 0.65 & 1.15 & 2.06 & \\multicolumn{1}{l|}{2.40} & 1.57 & \\multicolumn{1}{l|}{} & 0.76 & 1.17 & 1.99 & \\multicolumn{1}{l|}{2.02} & 1.48 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{all}}$ \\cite{martinez2017human}} & 0.88 & 1.11 & 1.17 & \\multicolumn{1}{l|}{1.20} & 1.09 & \\multicolumn{1}{l|}{} & 1.47 & 1.69 & 1.14 & \\multicolumn{1}{l|}{1.4} & 1.43 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{trans}}$ \\cite{martinez2017human}} & 0.85 & 1.18 & 1.19 & \\multicolumn{1}{l|}{1.17} & 1.09 & \\multicolumn{1}{l|}{} & 1.10 & 1.47 & 1.73 & \\multicolumn{1}{l|}{1.94} & 1.56 \\\\\n\\multicolumn{1}{l|}{\\textsc{paml} \\cite{gui2018few}} & 0.26 & 0.39 & 0.56 & \\multicolumn{1}{l|}{0.64} & 0.46 & \\multicolumn{1}{l|}{} & 0.58 & 0.64 & 0.69 & \\multicolumn{1}{l|}{0.83} & 0.69 \\\\\n\\multicolumn{1}{l|}{\\textsc{TimeHet} \\cite{brinkmeyer2022few}} & {\\ul 0.23} & {\\ul 0.30} & 0.44 & \\multicolumn{1}{l|}{0.53} & {\\ul 0.37} & \\multicolumn{1}{l|}{} & {\\ul 0.49} & {\\ul 0.52} & 0.58 & \\multicolumn{1}{l|}{0.62} & 0.55 \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} \\cite{zang2021few,zang2022few}} & 0.26 & 0.33 & {\\ul 0.43} & \\multicolumn{1}{l|}{{\\ul 0.52}} & 0.39 & \\multicolumn{1}{l|}{} & 0.51 & {\\ul 0.52} & {\\ul 0.54} & \\multicolumn{1}{l|}{{\\ul 0.61} } & {\\ul 0.54 } \\\\\n\\multicolumn{1}{l|}{\\textsc{GHN} (ours)} & \\textbf{0.17} & \\textbf{0.22} & \\textbf{0.30} & \\multicolumn{1}{l|}{\\textbf{0.37}} & \\textbf{0.27} & \\multicolumn{1}{l|}{} & \\textbf{0.41} & \\textbf{0.42} & \\textbf{0.43} & \\multicolumn{1}{l|}{\\textbf{0.48}} & \\textbf{0.44}\\\\\n\\cline{1-6} \\cline{8-12} \n\n\\multicolumn{1}{l|}{Lift in \\%} & 26.1 & 26.7 & 30.2 & \\multicolumn{1}{l|}{28.8} & 27.0 & \\multicolumn{1}{l|}{} & 16.3 & 19.2 & 20.4 & \\multicolumn{1}{l|}{21.3} & 18.5 \\\\ \n & & & & & & & & & & & \\\\\n\\multicolumn{1}{l|}{} & & discussion & & \\multicolumn{1}{l|}{} & & \\multicolumn{1}{l|}{} & & eating & & \\multicolumn{1}{l|}{} & \\\\\n\\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & & \\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & \\\\ \\cline{1-6} \\cline{8-12} \n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{single}}$ \\cite{martinez2017human}} & 0.97 & 1.56 & 1.86 & \\multicolumn{1}{l|}{2.67} & 1.77 & \\multicolumn{1}{l|}{} & 0.55 & 0.93 & 1.54 & \\multicolumn{1}{l|}{1.74} & 1.19 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{all}}$ \\cite{martinez2017human}} & 0.96 & 1.11 & 1.30 & \\multicolumn{1}{l|}{1.44} & 1.2 & \\multicolumn{1}{l|}{} & 0.85 & 1.03 & 0.92 & \\multicolumn{1}{l|}{1.05} & 0.96 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{trans}}$ \\cite{martinez2017human}} & 1.30 & 1.42 & 1.68 & \\multicolumn{1}{l|}{1.75} & 1.53 & \\multicolumn{1}{l|}{} & 0.68 & 0.78 & 0.94 & \\multicolumn{1}{l|}{1.03} & 0.86 \\\\\n\\multicolumn{1}{l|}{PAML \\cite{gui2018few}} & 0.35 & 0.52 & 0.78 & \\multicolumn{1}{l|}{0.91} & 0.64 & \\multicolumn{1}{l|}{} & 0.23 & 0.28 & 0.42 & \\multicolumn{1}{l|}{0.56} & 0.37 \\\\\n\\multicolumn{1}{l|}{\\textsc{TimeHet} \\cite{brinkmeyer2022few}} & {\\ul 0.29} & {\\ul 0.42} & 0.69 & \\multicolumn{1}{l|}{0.85} & 0.59 & \\multicolumn{1}{l|}{} & {\\ul 0.20} & {\\ul 0.28} & 0.41 & \\multicolumn{1}{l|}{{\\ul 0.52}} & {\\ul 0.35 } \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} \\cite{zang2021few,zang2022few}} & 0.34 & {\\ul 0.42} & {\\ul 0.62} & \\multicolumn{1}{l|}{{\\ul 0.77}} & {\\ul 0.54} & \\multicolumn{1}{l|}{} & 0.25 & 0.29 & {\\ul 0.39} & \\multicolumn{1}{l|}{0.59} & 0.38 \\\\\n\\multicolumn{1}{l|}{\\textsc{GHN} (ours)} & \\textbf{0.22} & \\textbf{0.30} & \\textbf{0.55} & \\multicolumn{1}{l|}{\\textbf{0.69}} & \\textbf{0.44} & \\multicolumn{1}{l|}{} & \\textbf{0.14} & \\textbf{0.17} & \\textbf{0.25} & \\multicolumn{1}{l|}{\\textbf{0.34}} & \\textbf{0.22}\\\\\n\\cline{1-6} \\cline{8-12} \n\n\\multicolumn{1}{l|}{Lift in \\%} & 24.1 & 28.6 & 11.3 & \\multicolumn{1}{l|}{10.4} & 18.5 & \\multicolumn{1}{l|}{} & 30.0 & 39.3 & 35.9 & \\multicolumn{1}{l|}{34.6} & 37.1\n\\end{tabular}\n\\\\\n\n\\caption{Results few-shot motion prediction with heterogenous sensors given in Mean Angle Error of different methods on Human3.6M. Best results are in bold, second best underlined. The percentage improvement is given for our model compared to the respective second best one.}\n\\label{table:resultsSub}\n\\end{table}\n \n\\begin{figure}[t] \\centering\n \\includegraphics[width=1\\textwidth]{line_forecast.pdf}\n \\caption{Examples of three motion predictions in exponential map for \\textsc{GHN} and baseline approaches. We sampled two examples where our approach (red) has the lowest error and one where a baseline performs best.}\n \\label{fig:liney}\n\\end{figure}\n\nWe compare against three non-meta-learning baselines, which are variations of the popular detection network \\textsc{residual-sup} \\cite{martinez2017human}, which consists of stacked GRU's with residual skip connections: $\\text{\\textsc{res-sup}}_{single}$ trains the model on the support data of the test task at hand only while evaluating the query data. $\\text{\\textsc{res-sup}}_{all}$ trains the model on the data of all the meta-training actions in standard supervised fashion. In the case of the heterogeneous tasks, the sensor dimension is padded with zeros to 54 since the model is not equipped to deal with heterogeneous sensor sets. The query data of the meta-test tasks is used to evaluate the final performance. $\\text{\\textsc{res-sup}}_{trans}$ uses $\\text{\\textsc{res-sup}}_{all}$ as a pretrained model to then fine-tune it to the support data of the test task at hand before evaluating the query data of it.\nFurthermore, we compare against the few-shot motion baselines \\textsc{paml} \\cite{gui2018few}, and \\textsc{MoPredNet} \\cite{zang2021few,zang2022few}, which both evaluate their approach in the homogeneous setup, as well as \\textsc{TimeHetNet}{} \\cite{brinkmeyer2022few} as it is the first model for time-series forecasting across heterogeneous attributes. Both \\textsc{paml} and \\textsc{MoPredNet} do not have any publicized code (and we could not reach the authors about it), which is why the results for the standard setting are taken from their respective published results. At the same time, we re-implemented both models to evaluate them on the heterogeneous setup. For \\textsc{TimeHetNet}{}, we utilize the officially published code. We had to adapt it as the original model is built to forecast a single target variable given a set of covariates that span the future time horizon. In contrast, we want to forecast multiple variables in a set without any given future covariates. The adapted version of \\textsc{TimeHetNet}{}, as well as the reimplementations of \\textsc{paml} and MoPredNet, can also be found in our link: \\url{https://github.com/brinkL/graphhetnet}.\nWe optimized the hyperparameters of all models via grid search. For our approach, the best found configuration includes two graph convolutional layers per \\textit{GCN} block, the \\textit{DS} blocks contain three stacked GRUs each, and the number of units per GRU is 64. We optimize our model with Adam and a learning rate of 0.0001.\n\n\\begin{table}[!t]\n \\begin{minipage}{.45\\linewidth}\n \n \\centering\n \\begin{tabular}{c|c|c}\n & Full & Sampled \\\\ \\hline\n vertices & $54$ & $26.8\\pm 12.9$ \\\\\n edges per vertex & $6.6\\pm 3.1$ & $3.9\\pm 1.7$ \n \\end{tabular}\n \\vspace{1em}\n \\caption{Statistics of the full Human3.6M graph and for the subgraphs sampled during training on tasks with heterogeneous attributes.}\n \\label{tab:stats}\n \\end{minipage}\\hspace{.09\\linewidth}\n\\begin{minipage}{.45\\linewidth}\n \\centering\n \\begin{tabular}{c|c|c|c|c}\n & PAML & MoPred & \\textsc{THN} & \\textsc{GHN} \\\\ \\hline\n Param. & 3,373K & 40,945K & 661K & 265K \\\\ \n\\end{tabular}\n \\\\ ~ \n\n \\caption{Number of parameters in the models PAML, MoPred, \\textsc{TimeHetNet}{} (as \\textsc{THN}), and \\textsc{GraphHetNet}{} (as \\textsc{GHN}, ours) in multiples of 1000.\n }\n \\label{tab:sizes}\n \\end{minipage} \n\\end{table} \n\\subsection{Results}\n\\begin{figure}[t!] \\label{network} \\centering\n \\includegraphics[width=.68\\textwidth]{hm_replacement_small.pdf}\n \\caption{Each line represents a model evaluated for tasks up to a certain number of sensors in test, while the x-axis shows the maximum number of sensors in meta-training. Results are given in MSE averaged across the normalized results for each action.}\n \\label{fig:heat}\n\\end{figure}\nThe results for our experiment on few-shot tasks with heterogeneous sensors are shown in Table~\\ref{table:resultsSub}. Our approach outperforms all baselines with significant margins over all actions and time horizons. The performance improvements compared to the respective second best approach range from 10.4 percent for 400ms on the action \"\\textit{discussion}\" to 39.3 percent for the motion prediction at 160ms for the action \"\\textit{eating}.\" The second-best results are shared between \\textsc{TimeHetNet}{} and \\textsc{MoPredNet} (abbreviated \\textsc{MoPred} in the table). Three examples for motion forecasts of this experiment are given in the Figure~\\ref{fig:liney} for two tasks where our approach has the highest performance and one task where a baseline approach performs better. As expected, the motion prediction of \\textsc{GraphHetNet}{} is most similar to \\textsc{TimeHetNet}{} with our method being more accurate. When comparing the model capacity of our approach and the analyzed baselines based on the model parameters illustrated in Table~\\ref{tab:sizes}, one can see that our model contains significantly fewer parameters, with two magnitudes difference to \\textsc{MoPredNet} \\cite{zang2022few}. The closest baseline is \\textsc{TimeHetNet}{} with still double the number of parameters.\n\n\\begin{table}[t]\n\\begin{tabular}{p{2cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.4cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}p{0.8cm}}\n\\multicolumn{1}{l|}{} & & walking & & \\multicolumn{1}{l|}{} & & \\multicolumn{1}{l|}{} & & smoking & & \\multicolumn{1}{l|}{} & \\\\\n\\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & Avg & \\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & Avg \\\\ \\cline{1-6} \\cline{8-12} \n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{single}}$ \\cite{martinez2017human}$*$} & 0.39 & 0.69 & 0.97 & \\multicolumn{1}{l|}{1.08} & 0.78 & \\multicolumn{1}{l|}{} & 0.27 & 0.50 & 0.98 & \\multicolumn{1}{l|}{1.00} & 0.69 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{all}}$ \\cite{martinez2017human}*} & 0.36 & 0.61 & 0.84 & \\multicolumn{1}{l|}{0.95} & 0.69 & \\multicolumn{1}{l|}{} & {\\ul 0.26} & 0.49 & 0.98 & \\multicolumn{1}{l|}{0.97} & 0.68 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{trans}}$ \\cite{martinez2017human}*} & 0.34 & 0.57 & 0.78 & \\multicolumn{1}{l|}{{\\ul 0.89}} & 0.65 & \\multicolumn{1}{l|}{} & {\\ul 0.26} & 0.48 & 0.93 & \\multicolumn{1}{l|}{0.91} & 0.65 \\\\\n\\multicolumn{1}{l|}{\\textsc{paml} \\cite{gui2018few}*} & 0.4 & 0.69 & 0.97 & \\multicolumn{1}{l|}{1.08} & 0.79 & \\multicolumn{1}{l|}{} & 0.34 & 0.63 & 1.13 & \\multicolumn{1}{l|}{1.12} & 0.80 \\\\\n\\multicolumn{1}{l|}{\\textsc{TimeHet} \\cite{brinkmeyer2022few}} & 0.32 & {\\ul 0.37} & 0.70 & \\multicolumn{1}{l|}{0.94} & 0.58 & \\multicolumn{1}{l|}{} & 0.43 & 0.46 & {\\ul 0.69} & \\multicolumn{1}{l|}{{\\ul 0.68}} & {\\ul 0.57} \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} (reimp.)} & 0.42 & 0.52 & 0.77 & \\multicolumn{1}{l|}{0.98} & 0.67 & \\multicolumn{1}{l|}{} & 0.48 & 0.54 & 0.71 & \\multicolumn{1}{l|}{0.94} & 0.67 \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} \\cite{zang2021few,zang2022few}*} & {\\ul 0.21} & \\textbf{0.35} & \\textbf{0.55} & \\multicolumn{1}{l|}{\\textbf{0.69}} & \\textbf{0.45} & \\multicolumn{1}{l|}{} & {\\ul 0.26} & {\\ul 0.47} & 0.93 & \\multicolumn{1}{l|}{0.9} & 0.64 \\\\\n\\multicolumn{1}{l|}{\\textsc{ghn} (ours)} & \\textbf{0.17} & \\textbf{0.35} & {\\ul 0.69} & \\multicolumn{1}{l|}{0.94} & {\\ul 0.54} & \\multicolumn{1}{l|}{} & \\textbf{0.12} & \\textbf{0.17} & \\textbf{0.67} & \\multicolumn{1}{l|}{\\textbf{0.54}} & \\textbf{0.38} \\\\ \n\\cline{1-6} \\cline{8-12} \n\n\\multicolumn{1}{l|}{Lift in \\%} & 19.0 & 0.0 & -25.5 & \\multicolumn{1}{l|}{-36.2} & -20.0 & \\multicolumn{1}{l|}{} & 53.8 & 63.8 & 28.0 & \\multicolumn{1}{l|}{40.0} & 40.6 \\\\ \n & & & & & & & & & & & \\\\\n\\multicolumn{1}{l|}{} & & discussion & & \\multicolumn{1}{l|}{} & & \\multicolumn{1}{l|}{} & & eating & & \\multicolumn{1}{l|}{} & \\\\\n\\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & & \\multicolumn{1}{l|}{} & 80 & 160 & 320 & \\multicolumn{1}{l|}{400} & \\\\ \\cline{1-6} \\cline{8-12} \n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{single}}$ \\cite{martinez2017human}*} & 0.32 & 0.66 & 0.95 & \\multicolumn{1}{l|}{1.09} & 0.76 & \\multicolumn{1}{l|}{} & 0.28 & 0.50 & 0.77 & \\multicolumn{1}{l|}{0.91} & 0.62 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{all}}$ \\cite{martinez2017human}$*$} & 0.31 & 0.66 & 0.94 & \\multicolumn{1}{l|}{{\\ul 1.03}} & 0.74 & \\multicolumn{1}{l|}{} & 0.26 & 0.46 & 0.70 & \\multicolumn{1}{l|}{0.82} & 0.56 \\\\\n\\multicolumn{1}{l|}{$\\text{\\textsc{res-sup}}_{\\text{trans}}$ \\cite{martinez2017human}$*$} & 0.30 & 0.65 & {\\ul 0.91} & \\multicolumn{1}{l|}{0.99} & 0.71 & \\multicolumn{1}{l|}{} & 0.22 & 0.35 & 0.54 & \\multicolumn{1}{l|}{\\textbf{0.69}} & 0.45 \\\\\n\\multicolumn{1}{l|}{\\textsc{paml} \\cite{gui2018few}*} & 0.36 & 0.72 & 1.03 & \\multicolumn{1}{l|}{1.15} & 0.82 & \\multicolumn{1}{l|}{} & 0.29 & 0.51 & 0.8 & \\multicolumn{1}{l|}{0.95} & 0.64 \\\\\n\\multicolumn{1}{l|}{\\textsc{TimeHet} \\cite{brinkmeyer2022few}} & 0.33 & {\\ul 0.49} & 1.00 & \\multicolumn{1}{l|}{1.31} & 0.78 & \\multicolumn{1}{l|}{} & 0.28 & 0.35 & 0.61 & \\multicolumn{1}{l|}{0.91} & 0.54 \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} (reimp.)} & 0.51 & 0.67 & 0.99 & \\multicolumn{1}{l|}{1.12} & 0.82 & \\multicolumn{1}{l|}{} & 0.35 & 0.47 & 0.62 & \\multicolumn{1}{l|}{0.83} & 0.56 \\\\\n\\multicolumn{1}{l|}{\\textsc{MoPred} \\cite{zang2021few,zang2022few}*} & {\\ul 0.29} & 0.63 & \\textbf{0.89} & \\multicolumn{1}{l|}{\\textbf{0.98}} & \\textbf{0.70} & \\multicolumn{1}{l|}{} & {\\ul 0.21} & {\\ul 0.34} & {\\ul 0.53} & \\multicolumn{1}{l|}{\\textbf{0.69}} & {\\ul 0.44} \\\\\n\\multicolumn{1}{l|}{\\textsc{ghn} (ours)} & \\textbf{0.19} & \\textbf{0.42} & 0.94 & \\multicolumn{1}{l|}{1.25} & \\textbf{0.70} & \\multicolumn{1}{l|}{} & \\textbf{0.17} & \\textbf{0.29} & \\textbf{0.52} & \\multicolumn{1}{l|}{{\\ul 0.75}} & \\textbf{0.43}\\\\\n\\cline{1-6} \\cline{8-12} \n\n\\multicolumn{1}{l|}{Lift in \\%} & 34.5 & 33.3 & -5.6 & \\multicolumn{1}{l|}{-27.6} & 0.0 & \\multicolumn{1}{l|}{} & 19.0 & 14.7 & 1.9 & \\multicolumn{1}{l|}{-8.7} & 2.3\n\\end{tabular}\n\\\\\n\n\\caption{Mean Angle Error of different methods on Human3.6M dataset for standard few-shot motion prediction task with fixed attribute space. The results with * are taken from the published results of Zang et al. \\cite{zang2022few}.}\n\\label{table:resultsFull}\n\\end{table} \n\\subsection{Ablations}\nWe also evaluated our model in the standard homogeneous setting where all tasks share a fixed motion graph.\nThe results are stated in Table \\ref{table:resultsFull}. We implemented our own version of \\textsc{paml} and \\textsc{MoPredNet}, as there is no public implementation in either approach. We received no further information when contacting the original authors. Both the published results and the results for our implementation of \\textsc{MoPredNet} are given in the table, as we were not able to replicate the results reported in the publication. For \\textsc{paml}, we only show the reported results as our reimplementation achieves results that match the reported results.\n\nOur approach is shown to be on par with the previously best approach, \\textsc{MoPredNet} while showing slight improvements for short-term frames after 80 and 160 ms. At the same time, the model capacity of our model is two magnitudes lower than of \\textsc{MoPredNet} and one magnitude lower than \\textsc{paml}. Comparing our results to \\textsc{TimeHetNet}{}, we see that convolutional graph layers give significant performance lifts. \n\nIn a further ablation, we analyzed the influence of the size of sampled subgraphs during meta-training on meta-testing. For this, we repeated our experimental setup but limited the maximum number of nodes in the subgraph from 5 to 35 for meta-training and -testing, respectively. The results are shown in Figure \\ref{fig:heat} and demonstrate that our approach is relatively robust to the size of the subgraphs in meta-training with a slight peak when training on tasks with up to 20 vertices, which means that the model can generalize to larger graphs during testing. It also illustrates how larger tasks correlate to a more difficult motion prediction as the chance to extract useful information from neighboring sensors increases. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \\section{Conclusion} \\label{chap:conclusion}\n\nIn this work, we proposed a new approach for few-shot human motion prediction, which generalizes over tasks with heterogeneous motion sensors arranged in a graph, outperforming all related baselines which are not equipped for varying sensor graphs. This is the first approach that allows for the prediction of novel human motion tasks independent of their number of sensors. Moreover, using this model, we can rival state-of-the-art approaches for the standard few-shot motion benchmark on tasks with homogeneous sensors while maintaining a significantly smaller model size which can be crucial for applications of human motion detection as these are often found in mobile and handheld devices. By publicising all our code including the baseline reimplementations as well as our benchmark pipline we hope to motivate future research in this area. \n \\bibliographystyle{splncs04}\n "} +{"id": "red-arxiv-8", "source_id": "red-arxiv_8_red-arxiv-8", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{\\@startsection {section}{1}{\\z@}{-3.5ex plus -1ex minus\n -.2ex}{2.3ex plus .2ex}{\\large\\bf}}\n\\def\\subsection{\\@startsection{subsection}{2}{\\z@}{-3.25ex plus -1ex\nminus -.2ex}{1.5ex plus .2ex}{\\normalsize\\bf}}\n\\makeatother\n\\makeatletter\n\\def\\arabic{section}.\\arabic{equation}{\\arabic{section}.\\arabic{equation}}\n\\newcommand{\\sect}[1]{\\setcounter{equation}{0}\\section{#1}}\n\\@addtoreset{equation}{section}\n\\renewcommand{\\arabic{section}.\\arabic{equation}}{\\thesection.\\arabic{equation}}\n\\makeatother\n\\renewcommand{\\baselinestretch}{1.15}\n\\textwidth 150mm\n\\textheight 210mm %\n\\topmargin -.05in\n\\oddsidemargin 5mm\n\\newcommand{\\begin{equation} \\begin{aligned}} \\newcommand{\\eea}{\\end{aligned} \\end{equation}}{\\begin{equation} \\begin{aligned}} \\newcommand{\\eea}{\\end{aligned} \\end{equation}}\n\\def\\be{\\begin{equation}} \\def\\ee{\\end{equation}} \n\\def\\nonumber{\\nonumber}\n\n\\newcommand{\\widetilde g}{\\widetilde g}\n\n\\setlength{\\evensidemargin}{0cm}\n\\setlength{\\oddsidemargin}{0cm}\n\\setlength{\\topmargin}{0.00cm}\n\\setlength{\\textwidth}{16cm}\n\\setlength{\\textheight}{22cm}\n\\setlength{\\headheight}{0cm}\n\\setlength{\\headsep}{0cm}\n\\setlength{\\voffset}{0cm}\n\\setlength{\\paperheight}{27cm}\n\\usepackage[colorlinks,linkcolor=black,citecolor=blue,urlcolor=blue,linktocpage,pagebackref]{hyperref}\n\\newcommand{\\hhref}[2][]{\\href{http://arxiv.org/abs/#2#1}{arXiv:#2}}\n\\frenchspacing\n\\allowdisplaybreaks\n\n\\newcommand{\\MS}[1]{{\\color{blue}[MS: #1]}}\n\\newcommand{\\LDP}[1]{{\\color{red}[LDP: #1]}}\n\\newcommand{\\FDC}[1]{{\\color{teal}[FDC: #1]}}\n\\setcounter{tocdepth}{2}\n\n\n\\tolerance=10000\n\\begin{document}\n\n\\thispagestyle{empty}\n\n\\begin{center}\n\n\t\\vspace*{-.6cm}\n\n\t\\begin{center}\n\n\t\t\\vspace*{1.1cm}\n\n\t\t{\\centering \\Large\\textbf{Free Energy on the Sphere for Non-Abelian Gauge Theories}}\n\n\t\\end{center}\n\n\t\\vspace{0.8cm}\n\t{\\bf Fabiana De Cesare$^{b,c}$, Lorenzo Di Pietro$^{a,b}$ and Marco Serone$^{b,c}$}\n\n\t\\vspace{1.cm}\n\t\n\t${}^a\\!\\!$\n\t{\\em Dipartimento di Fisica, Universit\\`a di Trieste, \\\\ Strada Costiera 11, I-34151 Trieste, Italy}\n\t\t\n\t\\vspace{.3cm}\n\n\t${}^b\\!\\!$\n\t{\\em INFN, Sezione di Trieste, Via Valerio 2, I-34127 Trieste, Italy}\n\n\t\\vspace{.3cm}\n\n\t${}^c\\!\\!$\n\t{\\em SISSA, Via Bonomea 265, I-34136 Trieste, Italy}\n\n\t\\vspace{.3cm}\n\n\n\\end{center}\n\n\\vspace{1cm}\n\n\\centerline{\\bf Abstract}\n\\vspace{2 mm}\n\\begin{quote}\n\nWe compute the $S^d$ partition function of the fixed point of non-abelian gauge theories in continuous $d$, using the $\\epsilon$-expansion around $d=4$. We illustrate in detail the technical aspects of the calculation, including all the factors arising from the gauge-fixing procedure, and the method to deal with the zero-modes of the ghosts. We obtain the result up to NLO, i.e. including two-loop vacuum diagrams. Depending on the sign of the one-loop beta function, there is a fixed point with real gauge coupling in $d>4$ or $d<4$. In the first case we extrapolate to $d=5$ to test a recently proposed construction of the UV fixed point of $5d$ $SU(2)$ Yang-Mills via a susy-breaking deformation of the $E_1$ SCFT. We find that the $F$ theorem allows the proposed RG flow. In the second case we extrapolate to $d=3$ to test whether QCD$_3$ with gauge group $SU(n_c)$ and $n_f$ fundamental matter fields flows to a CFT or to a symmetry-breaking phase. We find that within the regime with a real gauge coupling near $d=4$ the CFT phase is always favored. For lower values of $n_f$ we compare the average of $F$ between the two complex fixed points with its value at the symmetry-breaking phase to give an upper bound of the critical value $n_f^*$ below which the symmetry-breaking phase takes over. \n\n\\end{quote}\n\n\n\\newpage\n\n\\tableofcontents\n\n\\section{Introduction}\n\nThe intuitive idea that the number of degrees of freedom should decrease along a Renormalization Group (RG) flow can be made precise in Quantum Field Theory (QFT). This is done by assigning a quantity to Conformal Field Theories (CFT) --which typically are the endpoints of RG flows-- with the property of monotonicity along the flow, i.e. its value is always larger for the UV CFT than it is for the IR CFT. Such quantities were first found in even spacetime dimensions $d$ \\cite{Zamolodchikov:1986gt,Cardy:1988cwa,Komargodski:2011vj,Komargodski:2011xv} and identified with the $a$ coefficient in the Weyl anomaly\n\\be\n\\left\\langle T_{\\mu}^{\\mu}\\right\\rangle \\sim(-1)^{d / 2} a E_{d}+\\sum_{i} c_{i} I_{i}~,\n\\ee\nwhere $E_{d}$ is the Euler density term, and $c_{i}$ are the coefficients of other Weyl invariant curvature terms $I_i$. The inequality $a_{\\mathrm{UV}}>a_{\\mathrm{IR}}$ was proved in $d=2$ \\cite{Zamolodchikov:1986gt}, in which case it reduces to the celebrated $c$-theorem because $a=c/3$, and in $d=4$ \\cite{Komargodski:2011vj,Komargodski:2011xv}. Attempts to generalize the proof to $d=6$ \\cite{Elvang:2012st} so far have only succeeded in the supersymmetric case \\cite{Cordova:2015fha}. \n\nIn odd dimensions, even though there are no Weyl anomalies, a monotonic quantity can still be defined using the (renormalized) free energy on the sphere\n\\be\nF = -\\log Z_{S^d}~.\n\\ee\nThe inequality $F_{\\mathrm{UV}}>F_{\\mathrm{IR}}$, known as the $F$-theorem, was proposed and checked in \\cite{Jafferis:2011zi, Klebanov:2011gs} and then proved in $d=3$ using the relation to entanglement entropy across a spherical entangling surface \\cite{Casini:2011kv, Casini:2012ei}, which can also be used to prove monotonicity of the $a$ coefficient in $d=2,4$ \\cite{Casini:2006es, Casini:2017vbe}. For other odd dimensions it is conjectured that the decreasing quantity becomes $(-1)^{\\frac{d+1}{2}}F$ \\cite{Klebanov:2011gs}. This has not been proved yet but is motivated by several examples.\n\nIt is sometimes possible to continue RG flows to non-integer dimensions, at least formally. When a flow to an interacting fixed point can be continued to the vicinity of its upper or lower critical dimension, it becomes short and controllable in perturbation theory. This strategy, known as $\\epsilon$-expansion \\cite{Wilson:1971dc, Wilson:1973jj}, can lead to a useful approximation of strongly coupled fixed points. Motivated by this method, ref. \\cite{Giombi:2014xxa} proposed to unify all the previously mentioned inequalities in a single relation valid in continuous dimensions. The authors defined \n\\be\n\\widetilde{F}=-\\sin\\left(\\frac{\\pi d}{2}\\right)F,\n\\label{eq:FtildeDef}\n\\ee\nwhich in odd $d$ exactly reproduces the $(-1)^{(d+1)/2}F$ term, while in even $d$ provides a smooth limit proportional to the $a$ anomaly: the factor $\\sin(\\frac{\\pi d}{2})$ cancels the pole in the free energy leading to the finite limit $\\widetilde{F}=\\pi a/2$. Therefore, the inequality\n\\be\n\\widetilde{F}_\\text{UV}>\\widetilde{F}_\\text{IR}\n\\label{eq:Ftheo}\n\\ee\nautomatically encodes all the previous relations, and extends them to non-integer values of $d$. \n\nIn this paper we compute the quantity $\\widetilde{F}$ for the fixed point in non-abelian gauge theories, in an expansion around $d=4$. The existence of such fixed points can be inferred from the leading terms in the $\\beta$ function for the gauge coupling, which in $d=4+2\\epsilon$ read\n\\begin{equation}\n\\beta_g = \\epsilon g + \\beta_0 g^3 + \\mathcal{O}(g^5)~.\n\\end{equation}\nFor a small number of matter fields the one-loop coefficient $\\beta_0$ is negative and leads to a one-loop fixed point $g^{* 2}_{\\text{1-loop}}>0$ for $d>4$, while for $d<3$ a minimal number of matter fields is required to have $\\beta_0>0$, so that $g^{* 2}_{\\text{1-loop}}>0$. The quantity $\\widetilde{F}$ was computed in \\cite{Giombi:2015haa} for $d$-dimensional QED with $n_f$ four-component fermionic matter fields, for which $\\beta_0$ is always positive. It was then extrapolated to $d=3$ to study the existence of an interacting IR CFT for QED in 3 spacetime dimensions, by comparing with the quantity $F$ for the spontaneously broken phase of $2 n_f^2 +1$ massless Goldstone bosons. \n\nThe calculation in non-abelian gauge theories presents several new challenges compared to the abelian case. Firstly, the gauge fixing requires a more careful analysis, because it becomes unavoidable to include the interaction with the ghost fields. On the sphere massless scalar fields like the ghosts have zero modes. Due to the fermionic nature of ghosts, this naively leads to a zero in the partition function, which manifests as an IR divergence in $\\widetilde{F}$. This divergence needs to be cured by an appropriate regulator (or alternatively by appropriately modifying the gauge-fixing procedure, as we describe in an appendix). Note that, in order to obtain $\\widetilde{F}$, it is crucial to carefully keep track of the normalization of the path integral on $S^d$ when implementing the gauge-fixing through the Faddeev-Popov procedure \\cite{Faddeev:1967fc}. Secondly, the derivative self-interaction of the gluon leads to diagrams with two derivatives acting on the propagator, and it is important to include also the contact-term contributions in order to evaluate correctly the integrals over the positions of the vertices. Thirdly, unlike QED the renormalization in the gauge sector is not simply encoded in the definition of a renormalized gauge coupling, instead one needs to consider also wave-function counterterms for the gluons and the ghosts. We perform the calculation, taking care of all these issues, up to the next-to-leading (NLO) order, i.e. including up to two-loop vacuum diagrams. The result is in eq.~\\eqref{eq:fFinal}. Note that, while we compute the two-loop diagrams in generic $\\xi$-gauge, which allows us to compare with heat-kernel results for generic background \\cite{Jack:1982sn}, we keep track of the normalization of the path integral only in the special case of the Landau gauge, i.e. $\\xi = 0$.\n\nWe then apply this result to the fixed points of $SU(n_c)$ non-abelian gauge theories in $d=3$ and in $d=5$. In $d=3$, just like in the QED case mentioned above, the theory is known to flow to a CFT in the IR for a sufficiently large number of matter flavors $n_f$ \\cite{Appelquist:1989tc}, and it is conjectured to change its behavior for $n_f$ smaller than an unknown critical value $n_f^*$, flowing instead to a phase with spontaneous breaking of the global symmetry \\cite{Vafa:1983tf,Vafa:1984xh,Appelquist:1989tc,Komargodski:2017keh}. We adopt the same logic as in \\cite{Giombi:2015haa}, and compare $F$ of the fixed point to that of the putative Goldstone bosons phase. We find that when $\\beta_0>0$, so that $g^{* 2}_{\\text{1-loop}}>0$, the conformal phase is always favored compared to the symmetry-breaking phase. For $\\beta_0<0$ the fixed point is complex in the $\\epsilon$-expansion, but a unitary fixed point in $d=3$ can still exist.\\footnote{The opposite situation can also occur, a fixed point for $\\epsilon\\ll 1$ which disappears in physical integer dimensions.} We propose a more speculative approach to estimate $F$ of the $3d$ CFT in this case, by taking an average value of $\\widetilde F$ among the two complex fixed points. With this method we find that the Goldstone boson phase becomes favored for small $n_f$, allowing us to put an upper bound on $n_f^*$. The values found for $2\\leq n_c \\leq 5$ are reported in eq.~\\eqref{eq:nfbounds}. The result for $n_c=2$ favorably compares with previous bounds found using again the $F$-theorem combined with supersymmetry \\cite{Sharon:2018apk}, or lattice methods \\cite{Karthik:2018nzf}. We also give an estimate for the upper bound on $x^*$ in the Veneziano limit in eq.~\\eqref{eq:Veneziano}, where $x=n_f/n_c$.\n\nIn $d=5$ we use our calculation to investigate the existence of interacting CFTs that UV complete $5d$ non-abelian gauge theories. If such CFTs exist they would be an example of a non-supersymmetric interacting CFT in $d>4$. An interesting construction in the case of $SU(2)$ Yang-Mills theory was recently proposed in \\cite{BenettiGenolini:2019zth}, and further refined in \\cite{Bertolini:2021cew}, using the $E_1$ superconformal field theory that UV completes $SU(2)$ Super Yang-Mills. The putative non-supersymmetric CFT is obtained as the IR endpoint of the RG flow triggered by a certain non-supersymmetric deformation of $E_1$, and by construction it is endowed with a relevant deformation that flows to ordinary $SU(2)$ Yang-Mills theory. Using our extrapolation to $5d$ we can compare the quantity $F$ of the non-supersymmetric CFT with that of the $E_1$ SCFT, known from supersymmetric localization \\cite{Chang:2017cdx}, and test if the RG flow is allowed. \nWe can also easily repeat this check in the case with fundamental flavors $n_f$ and compare with the $F$ quantity of the $E_{n_f + 1}$ SCFT that UV completes the supersymmetric gauge theory with flavors. In all cases in which we have evidence for a fixed point in $d=5$, namely $n_f \\leq 4$ \\cite{DeCesare:2021pfb}, we obtain that the $F$-theorem allows the proposed RG flow.\n\nThe rest of the paper is organized as follows: in section 2 we explain some generalities about the calculation of the sphere partition function, we perform the gauge-fixing and compute the one-loop determinants for non-abelian gauge theories. In section 3 we derive the Feynman rules on the sphere, including the gauge field propagator in an arbitrary $\\xi$-gauge. In section 4 we compute the two-loop vacuum-vacuum diagrams and obtain our main result. In section 5 we apply the result to the $d=3$ and $d=5$ models described above. In section 6 we draw our conclusions and outline some possible future directions. Most of the technical points of the calculation are relegated to the first three appendices. In appendix D \n we show a sanity check of our results, by comparing in detail the UV divergences obtained for pure Yang-Mills theory in ref.~\\cite{Jack:1982sn} in the Feynman gauge $\\xi=1$ with our results.\nIn appendix E we explain a possible alternative gauge-fixing procedure (used already in \\cite{Pestun:2007rz}) where ghost zero modes are treated more carefully by introducing ghosts for ghosts,\nwhich we also use to partially check the results in the main body. \n\nFinally, a comment on notation: in this paper $n_f$ always refers to the number of $4d$ Dirac fermions. Given the way we analytically continue fermions, $n_f$ $4d$ Dirac fermions\ngive rise to $2n_f$ Dirac fermions in $3d$ and $n_f$ Dirac fermions in $5d$.\n\n\n\\section{Free energy of gauge theories on the sphere: leading order}\n\nLet us consider a non-abelian gauge theory with $n_f$ massless Dirac fermions in the fundamental representation.\nWe want to compute the sphere free energy in $d=4+2\\epsilon$, defined as\n\\be\nF = -\\log Z_{S^d}~, \\qquad \nZ_{S^d} = \\frac{1}{\\mathrm{vol}(\\mathcal{G})} \\int \\mathcal{D}A\\mathcal{D}\\psi\\mathcal{D}\\bar{\\psi} \\, \\exp\\left(-S[A,\\psi,\\bar{\\psi}, h]\\right)~.\n\\label{eq:a}\n\\ee\nHere $h$ denotes the round metric $h_{\\mu\\nu}$ on $S^d$ with radius $R$ and coordinate $x$, while $\\mathrm{vol}(\\mathcal{G})$ is the volume of the space of all gauge transformations, which in our choice of normalization does not depend on the gauge coupling $g$. We can split the action on the sphere in\n\\begin{align}\n\\begin{split}\nS=S_\\text{YM}+S_\\text{Ferm}+S_\\text{curv}\\ ,\n\\label{eq:b}\n\\end{split}\n\\end{align}\nwith\n\\begin{align}\nS_\\text{YM} & =\\int d^{d} x \\sqrt{h}\\left(\\frac{1}{4 g_{0}^{2}} \\mathrm{Tr}[F_{\\mu\\nu}(x)F^{\\mu\\nu}(x)]\\right)\\ \\label{eq:c1} \\,, \\\\\nS_\\text{Ferm} &=\\int d^{d} x \\sqrt{h}\\left(-\\sum_{i=1}^{n_f} \\bar{\\psi}_{i} \\gamma^{\\mu}\\left(\\nabla_{\\mu}+i A_{\\mu}\\right) \\psi^{i}\\right)\\ ,\\label{eq:c2} \\\\\nS_\\text{curv} & =\\int d^{d} x \\sqrt{h}\\left(b_{0} E +c_{0} \\mathcal{R}^{2} /(d-1)^{2}\\right),\n\\label{eq:c3}\n\\end{align}\nwhere $h={\\rm det}\\, h_{\\mu \\nu}$,\\footnote{We use the same symbol $h$ for the metric determinant ${\\rm det}\\, h_{\\mu \\nu}$ and for the metric tensor $h_{\\mu\\nu}$ whenever indices are omitted. The difference should be clear from the context.} $g_0$ is the bare gauge coupling constant, $\\psi^{i}$ are $n_f$ four-component Dirac fermions and $\\nabla_{\\mu}$ is the curved space covariant derivative which includes the spin connection term when acting on fermions. As the action should contain all operators that are marginal in $d=4$, we have added the curvature terms together with their bare coupling parameters $b_{0}$ and $c_{0}$.\\footnote{In a generic Euclidean manifold we should also include a term with the square of the Weyl tensor, omitted here as it vanishes on the sphere.} For future purposes, we recall the expression for the Ricci scalar $\\mathcal{R}$ and the Euler density $E$ on $S^d$:\n\\begin{equation}\n\\begin{aligned}\n&\\mathcal{R}=\\frac{d(d-1)}{R^2}~,~~E=\\mathcal{R}_{\\mu \\nu \\lambda \\rho} \\mathcal{R}^{\\mu \\nu \\lambda \\rho}-4 \\mathcal{R}_{\\mu \\nu} \\mathcal{R}^{\\mu \\nu}+\\mathcal{R}^{2}=\\frac{d(d-1)(d-2)(d-3)}{R^4}~.\n\\label{eq:curvScala}\n\\end{aligned}\n\\end{equation}\n\n\\subsection{One-loop determinants}\n\\label{sec:oneloop}\n\nAt leading order in a loopwise expansion the free energy is determined by one loop determinants. \nAs a consequence of the splitting in eq.~\\eqref{eq:b}, we can divide the leading term of the sphere free \nenergy $F_\\text{Free}$ in three parts:\n\\begin{equation}\nF_\\text{Free}=F_\\text{free-YM}+F_\\text{free-ferm}+F_\\text{curv}\\ ,\n\\end{equation}\nwith\n\\begin{align}\nF_\\text{free-YM} & =-\\log\\bigg(\\frac{1}{\\mathrm{vol}(\\mathcal{G})} \\int \\mathcal{D}Ae^{-S_{\\text{free-YM}}[A, h]}\\bigg)\n\\label{eq:d1}\\,, \\\\\nF_\\text{free-ferm} & =-\\log \\Big(\\int\\mathcal{D}\\psi\\mathcal{D}\\bar{\\psi} \\, e^{-S_{\\text{free-ferm}}[\\psi, h]}\\Big)\\,,\n \\label{eq:d2} \\\\\nF_\\text{curv} & =\\Omega_d R^{d-4}(d(d-1)(d-2)(d-3))b_0+ d^2 c_0),\n\\label{eq:d3}\n\\end{align}\nwhere $S_\\text{free-YM}$ is the quadratic part of the Yang-Mills action, $S_{\\text{free-ferm}}$ the free fermion action and \n$\\Omega_d= 2\\pi^{\\frac{d+1}{2}}/\\Gamma(\\frac{d+1}{2})$ is the volume of the $d$-dimensional sphere with unit radius. \n\nThe expression for $F_{\\text{free-ferm}}$ was found in ref.~\\cite{Giombi:2014xxa}. The result for a single four-component Dirac spinor is \n\\be\nF_\\text{free-ferm}(d)=-\\frac{4}{\\sin(\\frac{\\pi d }{2})\\Gamma(1+d)}\\int_0^1 du\\ \\cos\\left(\\frac{\\pi u}{2}\\right)\\Gamma\\left(\\frac{1+d+u}{2}\\right) \\Gamma\\left(\\frac{1+d-u}{2}\\right)\\ .\n\\label{eq:r}\n\\ee\n\nLet us now focus on the computation of $F_\\text{free-YM}$.\nThe gauge field $A^\\mu$ on the sphere can be written as the sum of a longitudinal part $A^\\mu_{(0)}$ and a transverse part $A^\\mu_{(1)}$, which can be separately decomposed in orthonormal eigenvectors of the sphere Laplacian $-\\nabla^2$:\n\\begin{equation}\n\\begin{split}\nA^\\mu&=A_{(0)}^\\mu+A_{(1)}^\\mu~,\\quad\\text{such that} \\quad \\nabla_\\mu A_{(1)}^\\mu=0~,\\\\\nA_{(0)}^\\mu&=\\sum_{l>0}a_{(0)}^\\ell A_{(0)}^{\\mu\\ \\ell}~, \\quad A_{(1)}^\\mu=\\sum_{l>0}a_{(1)}^\\ell A_{(1)}^{\\mu\\ \\ell}~,\n\\label{eq:h}\n\\end{split}\n\\end{equation}\nwith corresponding eigenvalues $\\lambda_{\\ell}^{(1)}$, $\\lambda_{\\ell}^{(0)}$ and degeneracies $g_{\\ell}^{(1)}$, $g_{\\ell}^{(0)}$ given by \\cite{Rubin:1984tc}\n\\begin{equation} \\begin{aligned}} \\newcommand{\\eea}{\\end{aligned} \\end{equation}\n\\lambda_{\\ell}^{(1)} & =\\frac{(\\ell(\\ell+d-1)-1)}{R^2}~, \\quad \\quad \\;\\;\\; g_{\\ell}^{(1)} =\\frac{\\ell(\\ell+d-1)(2 \\ell+d-1) \\Gamma(\\ell+d-2)}{\\Gamma(\\ell+2) \\Gamma(d-1)}~,~~ \\ell > 0~, \\\\\n\\lambda_{\\ell}^{(0)} & = \\frac{\\ell(\\ell+d-1)-(d-1)}{R^2}~, \\quad g_{\\ell}^{(0)} =\\frac{(2 \\ell+d-1) \\Gamma(\\ell+d-1)}{\\Gamma(\\ell+1) \\Gamma(d)}~, \\,~~\\quad \\qquad \\qquad \\ell > 0~.\n\\label{eq:f}\n\\eea\nNote that the eigenfunctions of the longitudinal part can be rewritten in terms of the covariant derivative of the spherical harmonics $Y_\\ell(x)$\n\\begin{equation}\\label{eq:p2}\nA_{(0)}^{\\mu\\ \\ell}=\\frac{1}{\\sqrt{\\lambda_{\\ell}^{(S)}}}\\nabla^\\mu Y_\\ell(x)~,~~\\text{for } \\ell\\ge1~.\n\\end{equation}\nWe take the spherical harmonics to be normalized as\n\\be\n\\int d^dx\\sqrt{h}\\ Y_\\ell(x)Y_{\\ell'}(x)=\\delta_{\\ell\\ell'}~.\n\\label{eq:u2}\n\\ee\nIn order to make the basis $A_{(0)}^{\\mu\\ \\ell}$ orthonormal, we have fixed the normalization factor in terms of the eigenvalue of the laplacian operator associated to $Y_\\ell(x)$\n\\begin{equation}\n\\begin{split}\n\\lambda_{\\ell}^{(S)}=\\frac{\\ell(\\ell+d-1)}{R^2}~,\n\\label{eq:f2}\n\\end{split}\n\\end{equation}\nwhich has degeneracy $g_\\ell^{(0)}$. \nNote a crucial difference between the spectrum for a scalar and for the longitudinal modes of a vector: the former includes a constant mode with eigenvalue $\\lambda_{0}^{(S)}=0$ and degeneracy $g_0^{(0)}=1$, while for the latter the modes are restricted to $\\ell>0$ and as a result the constant is excluded.\n\nIn dimensional regularization the following identities are valid, which will be useful later in the computation:\n\\begin{equation}\n\\sum_{\\ell=1}^{\\infty} g_{\\ell}^{(1)}=1 \\quad \\text{and} \\quad \\sum_{\\ell=1}^{\\infty} g_{\\ell}^{(0)}=-1~.\n\\label{eq:o}\n\\end{equation}\nWith this decomposition in longitudinal and transverse mode the path integral measure can be rewritten as\n\\be\n\\int\\mathcal{D}A=\\int\\prod_{\\ell=1}^\\infty d a_{(0)}^\\ell\\ \\int\\prod_{\\ell=1}^\\infty d a_{(1)}^\\ell\\ \\ .\n\\ee\n\n\\subsection{Computation in Landau gauge}\n\\label{sec:Landau}\n\nWe want to compute \n\\be\nF_\\text{free-YM}=-\\log \\bigg(\\frac{1}{\\mathrm{vol}(\\mathcal{G})} \\int \\mathcal{D}A\\,e^{-S_{\\text{free-YM}}[A, h]} \\bigg)\\,,\n\\ee\nwith\n\\begin{equation}\n S_\\text{free-YM}= \\int d^{d} x \\sqrt{h}\\,\\frac{1}{2 g_{0}^{2}}\\mathrm{Tr}\\left[A_{\\nu}(-\\delta^\\nu_\\mu\\nabla^2+R^\\nu_\\mu+\\nabla^\\nu\\nabla_\\mu) A^\\mu\\right]\n\\end{equation}\nand $R^\\nu_\\mu=\\frac{d-1}{R^2}\\delta^\\nu_\\mu$ on $S^d$.\nIn order to perform the explicit computation it is convenient to add a gauge-fixing term to the action. We work in Landau gauge and set to zero the longitudinal component of the gauge field. In order to do that we insert in the path integral of eq.~\\eqref{eq:a} the following functional identity, valid for any fixed $A_\\mu(x)$:\n\\begin{equation}\n1 = \\int_\\mathcal{G'} \\mathcal{D}\\mu_g(U) \\delta(\\nabla^\\mu A_\\mu^{U} )\\left | \\mathrm{det}\\frac{\\delta \\nabla^\\mu A_\\mu^{U}}{\\delta \\epsilon}\\right | ~,\n\\label{eq:e}\n\\end{equation}\nwhere $A^U_\\mu(x)$ is the gauge-transformed field under $U(x)$\n\\begin{equation}\nA_\\mu(x) \\to A^U_\\mu(x) = U(x)(\\nabla_\\mu + i A_\\mu(x) )U^{-1}(x) \\equiv U(x)D^A_\\mu U^{-1}(x)~.\n\\end{equation}\nTaking the components in the Lie Algebra, denoted with indices $a,b,c,\\dots$, and also writing $U=\\exp(i \\epsilon^a T^a)$ in terms of the parameter $\\epsilon^a$ and the generators $T^a$, we get the infinitesimal transformation\n\\begin{equation}\n\\delta A^a_\\mu(x) = (D^A_\\mu\\epsilon)^a(x) = \\nabla_\\mu \\epsilon^a(x) + i f^{abc}A^b_\\mu(x)\\epsilon^c(x)~.\n\\end{equation}\nThe integration in eq.~\\eqref{eq:e} is performed over the functional Haar measure $\\mu_g$ and is restricted to the set of gauge transformations $\\mathcal{G}'$ that act non-trivially on $A_\\mu(x)$, i.e. those that give a non-zero functional determinant. In the functional derivative the variation $\\delta \\epsilon$ is an infinitesimal variation away from $U$ (the integration variable) and tangential to $\\mathcal{G}'$, hence $\\delta \\epsilon$ is any fluctuation not annihilated by the covariant derivative with connection $A_\\mu^U$. So we have\n\\begin{equation}\n\\left|\\mathrm{det}\\frac{\\delta \\nabla^\\mu A_\\mu^U}{\\delta \\epsilon}\\right | = \\mathrm{det}' \\left(-\\nabla^\\mu D^{A^U}_\\mu\\right)~,\n\\end{equation}\nwhere the prime denotes that we need to exclude the zero eigenvalue and the minus sign is taken to ensure positivity of the determinant, at least perturbatively. At this point in order to proceed we restrict ourselves to the case of Landau gauge, and use that in Landau gauge the operator is self-adjoint as $\\nabla^\\mu$ and $D^{A^U}_\\mu$ commute. Therefore, we can implement the prime by excluding constant modes instead of covariantly constant ones. We will always assume this meaning of the prime from now on, as this will lead to a great simplification in the following manipulations.\n\nInserting the identity in the path integral and exchanging the order of the integrals we obtain\n\\begin{equation}\nF =-\\log \\bigg( \\frac{1}{\\mathrm{vol}(\\mathcal{G})} \\int_\\mathcal{G'} \\!\\mathcal{D}\\mu_g(U) \\! \\int\\! \\mathcal{D}A \\, \\exp\\left(-S[A,\\psi,\\bar{\\psi}, h]\\right) \\,\\delta(\\nabla^\\mu A_\\mu^{U}) \\mathrm{det}' \\left(-\\nabla^\\mu D^{A^U}_\\mu\\right)\\! \\bigg)~.\n\\end{equation}\nUsing gauge invariance of the integration measure and of the action the integral in $A$ can be rewritten in terms of the variable $A^U$, renamed $A$. As a result the integral over $\\mu_g$ yields just the volume of $\\mathcal{G}'$ and we get\n\\begin{equation}\nF =-\\log\\Bigg( \\frac{\\mathrm{vol}(\\mathcal{G'})}{\\mathrm{vol}(\\mathcal{G})} \\int \\mathcal{D}A \\, \\exp\\left(-S[A,\\psi,\\bar{\\psi}, h]\\right) \\,\\delta(-\\nabla^\\mu A_\\mu) \\mathrm{det}' \\left(-\\nabla^\\mu D^A_\\mu\\right) \\Bigg)~.\n\\end{equation}\nThe ratio of the two infinite-dimensional volumes gives the volume of the constant gauge transformations, i.e. the volume of the group $G$, multiplied by an\n additional factor that arises by requiring an orthonormal mode decomposition in the path integral.\\footnote{The normalization of the path integral is chosen following ref.~\\cite{Pestun:2007rz}. There is however a difference in the computation of the volume of the gauge group as in our notation the coupling does not appear in the volume expression. }\n In order to explain this factor, consider separating a generic gauge transformation $f:S^d\\rightarrow G$ in a constant and a non-constant part $f(x)=f_0+f'(x)$. This can be done via the decomposition in spherical harmonics: $f(x)=\\sum_{\\ell=0}^\\infty F_\\ell Y_\\ell(x)$. In terms of this decomposition the measure of the path integral is \n\\be\\int\\mathcal{D}f=\\int\\prod_{\\ell=0}^\\infty d F_\\ell~.\n\\label{eq:g}\n\\ee\nBecause of the normalization in \\eqref{eq:u2} we have $Y_0=1/\\sqrt{ \\mathrm{vol}(S^d)}$, which implies ${f}_0=F_0/\\sqrt{ \\mathrm{vol}(S^d)}$ and\n\\begin{equation}\n\\mathrm{vol}(\\mathcal{G}) =\\int dF_0 \\,\\mathrm{vol}(\\mathcal{G'})= \\mathrm{vol}(S^d)^{\\frac{\\text{dim}(G)}{2}}\\,\\mathrm{vol}(G) \\,\\mathrm{vol}(\\mathcal{G'})~.\n\\end{equation}\nThis leads to\n\\begin{equation}\nF =-\\log \\Bigg( \\frac{ \\mathrm{vol}(S^d)^{\\frac{-\\text{dim}(G)}{2}}}{ \\mathrm{vol}(G)}\n \\int \\mathcal{D}A \\, \\exp\\left(-S[A,\\psi,\\bar{\\psi}, h]\\right) \\,\\delta(\\nabla^\\mu A_\\mu ) \\mathrm{det}' \\left(-\\nabla^\\mu D^A_\\mu\\right) \\Bigg)~.\n\\label{eq:a6}\n\\end{equation}\nWe then introduce non-constant $c'$ and $\\bar{c}'$ ghost modes to rewrite the $\\text{det}'$ as\n\\begin{equation}\n\\mathrm{det}' \\left(-\\nabla^\\mu D^A_\\mu\\right)=\\int \\mathcal{D}c'\\,\\mathcal{D}\\bar{c}' \\exp{\\left(-\\int d^d x \\sqrt{h(x)} \\,\\mathrm{Tr}[\\bar{c}'(x) \\nabla^\\mu D^A_\\mu c'(x)] \\right)} ~.\n\\label{eq:Sghost}\n\\end{equation} \nThe final step is to use the decomposition \\eqref{eq:h} to rewrite the $\\delta$-functional in eq.~\\eqref{eq:a6} in terms of the coefficients of the decomposition\n\\begin{equation}\n\\delta(\\nabla^\\mu A_\\mu )=\\delta\\left(\\sum_{\\ell=1}^\\infty\\frac{a^\\ell_{(0)}}{\\sqrt{\\lambda_\\ell^{(S)}}}\\nabla^2 Y_\\ell(x) \\right)=\\prod_{\\ell=1}^\\infty{\\left(\\frac{\\ell(\\ell+d-1)}{R^2}\\right)^{-\\frac{g_\\ell^{(0)}}{2}\\mathrm{dim}(G)}}\\delta\\left(a^\\ell_{(0)}\\right) \\ .\n\\label{eq:nablaExp}\n\\end{equation}\nThis sets to zero the longitudinal modes and provides a crucial factor in the path integral. \nPlugging eq.~\\eqref{eq:nablaExp} in eq.~\\eqref{eq:a6} and focusing on the Yang-Mills leading contribution gives\n\\begin{equation}\n\\begin{split}\nF_{\\text{free-YM}} &= -\\log \\Bigg(\\frac{1}{\\mathrm{vol}(G)\\sqrt{ \\mathrm{vol}(S^d)^{\\text{dim}(G)}}} \\prod_{\\ell=1}^\\infty{\\left(\\frac{\\ell(\\ell+d-1)}{R^2}\\right)^{-\\frac{g_\\ell^{(0)}}{2}\\mathrm{dim}(G)}}\\\\ &\\int \\mathcal{D}A_{(1)}\\, \\mathcal{D}c'\\,\\mathcal{D}\\bar{c}' \\, \\exp\\left(-S_{\\text{YM-Free}}[A_{(1)}, h]-\\int d^d x \\sqrt{h(x)} \\,\\mathrm{Tr}[\\bar{c}'(x) \\nabla^2 c'(x)]\\right)\\Bigg) ~.\n\\end{split}\n\\label{eq:freeym}\n\\end{equation}\n\nWe are finally ready to compute the integral. We start from the integration over $A_{(1)}$. Using the decomposition in eqs.(\\ref{eq:h}-\\ref{eq:f}) and the normalization in eq.~\\eqref{eq:g} we get\n\\begin{equation}\n\\begin{split}\n\\int \\mathcal{D}A_{(1)} \\, &\\exp\\left({-\\int d^{d} x \\sqrt{h}\\frac{1}{2 g_{0}^{2}}\\left(A^a_{(1)\\mu}(-\\nabla^2+(d-1)) A_{(1)a}^\\mu\\right)}\\right)\\\\&=\\prod_{\\ell=1}^\\infty\\left(\\frac{2\\pi g_0^2R^2}{(\\ell+1)(\\ell+d-2)}\\right)^{\\frac{g_\\ell^{(1)}}{2}\\mathrm{dim}(G)}.\n\\label{eq:v}\n\\end{split}\n\\end{equation}\nFor the computation of the ghost path integral we again decompose in spherical harmonics: \n\\be c'(x)=\\sum_{\\ell=1}^\\infty C_\\ell Y_\\ell(x), \\quad\\int\\mathcal{D}c'=\\int\\prod_{\\ell=1}^{\\infty}dC_\\ell\\,.\\ee As we are dealing with Grassmann variables, we have \\be\\int\\mathcal{D}{C_\\ell}\\mathcal{D}\\bar{C}_\\ell\\exp\\left(\\bar{C}_\\ell C_\\ell\\right) = 1~,\\ee\nimplying\n\\begin{equation}\n \\int\\mathcal{D}c'\\,\\mathcal{D}\\bar{c}' \\, \\exp\\left(-\\int d^d x \\sqrt{h(x)} \\,\\mathrm{Tr}[\\bar{c}'(x) \\nabla^2 c'(x)]\\right)=\\prod_{\\ell=1}^\\infty\\left(\\frac{\\ell(\\ell+d-1)}{R^2}\\right)^{{g_\\ell^{(0)}}\\mathrm{dim}(G)}\\,.\n\\end{equation}\nReplacing in eq.~\\eqref{eq:freeym}, we get\n\\begin{align}\n\\begin{split}\nF_{\\text{free-YM}} &=\\log\\mathrm{vol}(G)+ \\frac{\\text{dim}(G)}{2}\\left(\\log \\mathrm{vol}(S^d)+ \\sum_{\\ell=1}^{\\infty} g_{\\ell}^{(1)} \\log \\left(\\frac{(\\ell+1)(\\ell+d-2)}{2\\pi g_0^{2}R^2}\\right)\\right.\\\\&\\left.-\\sum_{\\ell=1}^{\\infty} g_{\\ell}^{(0)} \\log \\left({\\frac{\\ell(\\ell+d-1)}{R^2}}\\right) \\right)~.\\label{eq:z7}\n\\end{split}\n\\end{align}\nIn order to find an explicit expression for these series one can follow \\cite{Giombi:2015haa}, who performed the same computation in the abelian case. Their procedure is based on the rewriting of the logarithms appearing in eq.~\\eqref{eq:z7} with the identities\n\\be\n\\log (y)=\\int_{0}^{\\infty} \\frac{d t}{t}\\left(e^{-t}-e^{-y t}\\right)\\,, \\quad \\frac{1}{t}=\\frac{1}{1-e^{-t}} \\int_{0}^{1} d u \\, e^{-u t} \\ .\n\\label{eq:u}\n\\ee\nThen, using gamma function identities, eq.~\\eqref{eq:o}, and performing the $t$-integrals, one can find an analytical expression for $F_{\\text {free-YM}}$. The only subtle point regards the ghost determinant. It is necessary to add and remove the zero mode, regulating with a mass parameter $\\delta$ which is set to zero in the end. This provides\n\\be -\\sum_{\\ell=1}^{\\infty} g_{\\ell}^{(0)} \\log \\left({\\ell(\\ell+d-1)}\\right)=\\lim _{\\delta \\rightarrow 0}\\left[-\\sum_{\\ell=0}^{\\infty} g_{\\ell}^{(0)} \\log \\left({(\\ell+\\delta)(\\ell+d-1)}\\right)+ \\log \\left(\\delta(d-1)\\right)\\right]\\ ,\\ee\nFor the sum over $\\ell$ we use again eq.~\\eqref{eq:u}, while for the $ \\log \\left(\\delta(d-1)\\right)$ we use \\cite{Giombi:2015haa} \n\\begin{equation}\n\\log(\\delta)=-\\int_0^1\\frac{1}{u+\\delta}+\\log(1+\\delta)\\,.\n\\end{equation} \nPutting everything together we find a smooth limit $\\delta\\rightarrow0$, which reads\n\\be\nF_{\\text {free-YM}}(d)=\\mathrm{dim}(G)F_{\\mathrm{Max}}(d)-\\frac{\\mathrm{dim}(G)}{2} \\log \\left({ g_0^2 R^{4-d}}\\right)+\\log\\frac{\\mathrm{vol}(G)}{(2\\pi)^{\\text{dim}(G)}}\\,.\n\\label{eq:s}\n\\ee\nwhere $F_{\\mathrm{Max}}(d)$ reads\n\\be\n\\begin{aligned}\nF_{\\mathrm{Max}}(d)=&\\frac{1}{2}\\log(2\\pi(d-1)^{2}\\Omega_d)-\\frac{1}{\\sin \\left(\\frac{\\pi d}{2}\\right)} \\int_{0}^{1} du\\biggl((2 u-d) \\sin \\left(\\frac{\\pi}{2}(d-2 u)\\right) \\frac{\\Gamma(d-u) \\Gamma(u)}{\\Gamma(d+1)}\\\\&+\\left(d^{2}+\\right.1-3 d(1+u)+2 u(u+2)) \\frac{\\sin \\left(\\frac{\\pi}{2}(2 u-d)\\right)\\Gamma(d-2-u) \\Gamma(1+u)}{2 \\Gamma(d)}\\\\&+\\frac{\\sin \\left(\\frac{\\pi d}{2}\\right)(d-2)}{(d-2)^{2}-u^{2}} +\\frac{\\sin \\left(\\frac{\\pi d}{2}\\right)}{u}\\biggr)\\,.\n\\label{eq:t}\n\\end{aligned}\n\\ee\n\\section{Feynman rules on the sphere}\n\\label{rules}\n\nIn this section we discuss the Feynman rules on $S^d$ for non-abelian gauge theories. We start by reviewing some preliminary notion on maximally symmetric spaces. We then generalize the computation of the vector propagator presented in \\cite{Allen_1986} in the Feynman gauge to an arbitrary $\\xi$-gauge. The ghost propagator requires some care in order to remove the zero mode, while the propagator of the Dirac fermion is computed by a Weyl transformation from flat space. We then derive the Feynman rules for the vertices. \n\n\\subsection{Bitensors in maximally symmetric spaces}\n\\label{prel}\n\nThe two-point function of a spinning operator in a curved space $M$ defines a bitensor, namely a bilocal function that is a tensor with respect to both of its arguments. In maximally symmetric spaces bitensors can be expressed as sums and products of a few building blocks.\nLet us briefly review these building blocks following ref.~\\cite{Allen_1986}. Starting with the geodesic distance $\\mu(x,x')$, which is a biscalar, other basic geometric objects are the parallel propagator $h^\\nu_{\\ b'}(x,x')$ transporting vectors along geodesics from $x$ to $x'$, and the unit vectors $n_\\nu(x,x')$ and $n_{\\nu'}(x,x')$ tangent to the geodesic at $x$ and $x'$ respectively:\n\\begin{equation}\nn_{\\nu}\\left(x, x^{\\prime}\\right)=\\nabla_{\\nu} \\mu(x, x) \\quad \\text { and } \\quad n_{\\nu^{\\prime}}\\left(x, x^{\\prime}\\right)=\\nabla_{\\nu^{\\prime}} \\mu\\left(x, x^{\\prime}\\right).\n\\end{equation} \n$h^\\nu_{\\ b'}(x,x'), n_\\nu(x,x')$ and $n_{\\nu'}(x,x')$ are examples of bitensors. We use the following notation: a bitensor $(n,m)$ is a rank $n$ tensor at $x$ and a rank $m$ tensor at $x'$. So for instance $h^\\nu_{\\ b'}(x,x'), n_\\nu(x,x')$ and $n_{\\nu'}(x,x')$ are respectively $(1,1)$, $(1,0)$ and $(0,1)$ bitensors.\nIn general objects written as the contraction of two bitensors depend on both $x$ and $x'$, even if they contain only primed or unprimed indices. An exception is the following identity relating the metric $h_{ \\nu \\lambda}$ to the parallel propagator\n\\begin{equation}\nh_{\\nu \\lambda}(x)=h_{\\nu}^{\\ \\rho^{\\prime}}(x, x^{\\prime}) h_{\\rho^{\\prime} \\lambda}\\left(x^{\\prime}, x\\right)\\,,\n\\end{equation}\nand similarly for $h_{\\nu' \\lambda'}(x')$. Covariant derivatives of bitensors can be taken with respect to either $x$ or $x'$ and are denoted by $\\nabla_\\nu$ and $\\nabla_{\\nu'}$ respectively. \n\nIt is possible to prove that any bitensor in a maximally symmetric space can be expressed as sums and products of the building blocks $h_{\\nu \\lambda}$, $h_{\\nu '\\lambda'}$, $n_\\nu$, $n_{\\nu'}$ and $h_{\\nu \\lambda'}$, with coefficients that are only functions of $\\mu$. This provides a remarkable simplification in finding the structure of propagators and their explicit expressions. \n\nLet us list some properties, useful for the derivation of propagators:\n\\begin{equation}\n\\begin{aligned}\n\\nabla_{\\nu} n_{\\lambda} & =A\\left(h_{\\nu \\lambda}-n_{\\nu} n_{\\lambda}\\right)\\,, \\\\\n\\nabla_{\\nu} n_{\\lambda^{\\prime}} & =C\\left(h_{\\nu \\lambda^{\\prime}}+n_{\\nu} n_{\\lambda}\\right)\\,, \\\\\n\\nabla_{\\nu} h_{\\lambda c^{\\prime}} & =-(A+C)\\left(h_{\\nu \\lambda} n_{\\rho^{\\prime}}+h_{\\nu \\rho} n_{\\lambda}\\right),\n\\end{aligned}\n\\label{prop}\n\\end{equation}\nwhere\n\\begin{equation}\n\\begin{aligned}\n& A(\\mu)= \\frac{1}{R} \\cot (\\mu / R) \\,, \\\\&\n C(\\mu)= -\\frac{1}{R} \\frac{1}{\\sin (\\mu / R)}\\,,\n \\label{eq:A&C}\n\\end{aligned}\n\\end{equation}\nwhere $R$ is the radius, defined in terms the constant value of the Ricci curvature scalar in eq.~\\eqref{eq:curvScala}. For future convenience it is useful to introduce the variable \n\\begin{equation}\nz(x,x')\\equiv \\cos^2\\left(\\frac{\\mu(x,x')}{2R}\\right)\\,.\n\\label{eq:zed}\n\\end{equation}\nwhich is the chordal distance between the points.\n\nLet us now specify to a sphere $S_R^d$ of radius $R$. Using stereographic coordinates $x^\\mu$ we write the metric as\n\\begin{equation}\nds^2= h_{\\mu \\nu} dx^\\mu dx^\\nu \\,, \\qquad h_{\\mu \\nu}=\\frac{4 R^{4}}{\\left(R^{2}+|x|^{2}\\right)^{2}} \\delta_{\\mu \\nu}~.\n\\label{eq:stereo}\n\\end{equation}\nThe geodesic distance is given by the following identity\n\\begin{equation}\\label{eq:zdef}\n\\cos \\left(\\frac{\\mu(x, x')}{R}\\right) =\n1-\\frac{2 R^{2}|x-x'|^{2}}{\\left(R^{2}+|x|^{2}\\right)\\left(R^{2}+|x'|^{2}\\right)} = 2z(x,x')-1\\,.\n\\end{equation}\nWhen $x'=0$, we denote for simplicity\n\\be\nz\\equiv z(x,0) = \\frac{R^2}{R^2+x^2}\\,.\n\\label{eq:zDef}\n\\ee\nThe variable $z$ will be useful to write propagator expressions and, in particular, their expansion around coincident points. \n\n\\subsection{Vector propagator on $S^d$}\n\\label{sec:gaugeprop}\n\nVector propagators for maximally $d$-dimensional symmetric spaces have been computed in \\cite{Allen_1986}. For our purpose we need the expression of the massless vector field on the sphere. It follows from the quadratic part of the gauge action\n\\begin{equation}\n S_\\text{free-YM}= \\int d^{d} x \\sqrt{h}\\frac{1}{2 g_{0}^{2}}\\mathrm{Tr}\\left[A_{\\nu}(-\\delta^\\nu_\\mu\\nabla^2+R^\\nu_\\mu+\\left(1-\\frac{1}{\\xi}\\right)\\nabla^\\nu\\nabla_\\mu) A^\\mu\\right],\n \\label{eqn:1}\n\\end{equation}\nthat the vector propagator \n$\nQ_{\\nu \\lambda'}^{ab}(x,x')= \\langle A^a_\\nu (x) A^b_{\\lambda'} (x') \\rangle = g_0^2\\delta^{ab}Q_{\\nu \\lambda'}(x,x')\n$\nsatisfies the equation\n\\begin{equation}\n\\left(-h^{\\mu \\nu}\\nabla^2 -R^{\\mu\\nu}+\\left(1-\\frac{1}{\\xi}\\right)\\nabla^\\mu\\nabla^\\nu\\right)Q_{\\nu \\lambda'}(x,x')=\\delta(x-x')h^\\mu _{ \\lambda'}~.\n\\label{eqn:44}\n\\end{equation}\nThe propagator $Q_{\\nu \\lambda'}(x,x')$ is a maximally symmetric (1,1) bitensor and can be decomposed as\n\\begin{equation}\nQ_{\\nu \\lambda'}(x,x')=\\alpha(\\mu)h_{\\nu \\lambda'}+\\beta(\\mu) n_{\\nu}n_{\\lambda'},\n\\label{eq:gaugeprop}\\end{equation}\nwhere $\\alpha$ and $\\beta$ are generic functions of the geodesic distance. Their expression is found in eqs.~\\eqref{eq:alphasol},~\\eqref{eq:alphatilda} and \\eqref{eq:betaExp} in appendix \\ref{app:prop}, where the interested reader can also find their detailed derivation.\n\n\n\\subsection{Ghost propagator on $S^d$}\n\\label{sec:sphere}\n\nThe ghost propagator $G^{ab}(x,x') =\\langle c^{\\prime\\,a}(x) \\bar{c}^{\\prime\\,b}(x') \\rangle$ satisfies\n\\begin{equation}\n\\nabla^2G^{ab}(x,x')=\\delta(x-x')\\delta^{ab}~.\n\\end{equation}\nAs explained in section \\ref{sec:Landau}, $c'$ has the zero mode removed, so we need to subtract the constant part from this propagator. This is also clear from the expansion of the propagator in terms of the spherical harmonics~\\eqref{eq:f2}:\n\\begin{equation}\nG^{ab}(x,x')=\\sum_{\\ell> 0}\\frac{R^2 }{-\\ell(\\ell+d-1)}Y_{\\ell}(x)Y_{\\ell}(x')\\delta^{ab}~,\n\\label{eqn:3}\n\\end{equation}\nwhere the constant mode $\\ell=0$ is excluded from the sum, otherwise giving a divergence. In order to resum this expression we need to introduce a small regulator, as we did for the one-loop computation of the free energy:\n\\begin{equation}\nG^{ab}(x,x')=\\lim_{\\delta \\rightarrow 0}\\Biggl [\\sum_{\\ell\\ge 0}\\frac{R^2 Y_{\\ell}(x)Y_{\\ell}(x')}{-\\ell(\\ell+d-1)+\\delta(d-1+\\delta)}-\\frac{R^2 Y_{0}^2}{\\delta(d-1+\\delta)}\\Biggr]\\delta^{ab} \\,.\n\\label{eqn:24}\n\\end{equation} \nThe first term corresponds to the propagator $G_{\\mathrm{reg}}(x, x')$ associated to a scalar field with mass $m^2=\\delta(d-1+\\delta)/R^2$, whose expression as a function of $z$ is\n\\begin{equation}\nG_{\\mathrm{reg}}(z;\\delta)=-\\frac{\\Gamma(d-1+\\delta)}{4(4\\pi)^{\\frac{d}{2}-1}R^{d-2}\\Gamma(1+\\delta)\\sin(\\pi \\delta)\\Gamma(\\frac{d}{2})} {}_2F_1\\Big(-\\delta,-1+d-\\delta,\\frac{d}{2},z\\Big).\n\\end{equation}\nPlugging in eq.~\\eqref{eqn:24} and taking the limit $\\delta\\rightarrow 0$, we find a well-defined expression for the ghost propagator:\n\\be\\begin{split}\nG^{ab}(z) = \\delta^{ab}G(z)= & \\frac{\\delta^{ab}}{4(4\\pi)^{\\frac{d}{2}-1}R^{d}\\sin(\\pi d)\\Gamma(2-d)\\Gamma(\\frac{d}{2})}\\Big(H(d-2) \\\\ \n& -\\frac{2(d-1)z}{d} {}_3F_2\\big(1,1,d;2,1+\\tfrac{d}{2};z\\big)\\Big) \\,,\n\\end{split}\n\\label{eqn:13b}\n\\ee\nwhere $H$ denotes the harmonic number, which can be written in terms of the digamma function $\\psi$ and the Euler constant $\\gamma$ as\n\\be\nH(x) = \\gamma+ \\psi(x+1) \\,.\n\\ee\n\n\\subsection{Fermion propagator on $S^d$}\nThe fermion propagator on $S^d$ is easily computed from its known expression in flat space by performing a Weyl rescaling, see eq.~\\eqref{eq:stereo}.\nWe have\n\\begin{equation}\n S^{i}_{ j}(x,0)=\\langle{\\psi}^i(x)\\bar{\\psi}_j(0)\\rangle_\\text{sphere}=\\frac{\\langle{\\psi}^i(x)\\bar{\\psi}_j(0)\\rangle_\\text{flat}}{\\Omega(x)^\\frac{d}{2}\\Omega(0)^\\frac{d}{2}}=\\delta^i_{ j}\\frac{\\Gamma\\left(\\frac{d}{2}\\right)(R^2+x^2)^\\frac{d}{2}\\gamma^\\mu x_\\mu}{2^{(d+1)}\\pi^{\\frac{d}{2}}\\left(x^{{2}}\\right)^{\\frac{d}{2}}R^d}\\, ,\n\\end{equation}\nwhere in the last equality we used\n\\begin{equation}\n \\langle{\\psi}^i(x)\\bar{\\psi}_j(0)\\rangle_\\text{flat}=\\delta^i_{ j}\\frac{\\Gamma\\left(\\frac{d}{2}\\right)\\gamma^\\mu x_\\mu}{2\\pi^{\\frac{d}{2}}\\left(x^{{2}}\\right)^{\\frac{d}{2}}}\\,,\n \\qquad \\Omega(x)=\\frac{2R^2 }{R^2+x^{2}}\\,.\n\\end{equation}\n\n\n\\subsection{Vertices on the sphere}\n\nThe Feynman rules for the vertices can be read from the interacting part of the gauge-fixed action. Namely, we have four possible interactions defined as\n\\begin{align}\ng_0\\Gamma^{\\mathrm{TR}}(x)&=-\\frac{1}{g_0^2}f^{abc}\n\\nabla_\\nu A_\\lambda^aA_\\nu^bA_\\lambda^c(x)\\, ,\\\\\ng_0^2\\Gamma^{\\mathrm{QU}}(x)&=-\\frac{1}{4g_0^2}f^{abc}f^{ade}\ng_0A_\\nu^bA_\\lambda^c A_\\nu^dA_\\lambda^e(x)\\, , \\\\\ng_0\\Gamma^{\\mathrm{GH}}(x)&=f^{abc}\n\\nabla_\\nu\\bar{c}^{\\prime a}A_\\nu^bc^{\\prime c}(x)\\, , \\\\\ng_0\\Gamma^{\\mathrm{FE}}(x)&=T^a _{\\alpha\\beta}\\bar{\\psi^\\alpha_i}\\gamma^\\mu\\psi^\\beta_i A^a_\\mu(x) \\, ,\n\\end{align}\nrespectively the triple gluon, the quartic gluon, the ghost-gluon and the fermion-gluon interactions. \n\n\\section{Next to leading contribution}\nIn the previous section we have obtained the Feynman rules for gauge theories on the sphere. We now have all the ingredients to compute the free energy at the next-to-leading order.\nFor $n_f$ Dirac fermions in the fundamental representation of the gauge group $G=SU(n_c)$ we have\n\\begin{equation}\n\\begin{split}\nF= & (n_c^2-1)F_{\\mathrm{Max}}(d) -\\frac{1}{2}(n_c^2-1)\\log \\left({ g_0^2 R^{4-d}} \\right)+\\log\\left(\\frac{\\mathrm{vol}(SU(n_c))}{(2\\pi)^{n_c^2-1}}\\right)\\\\\n& +n_f n_c F_\\text{free-ferm}+F_\\mathrm{curv} -\\frac{1}{2} g_0^2G_2+\\dots,\n\\end{split}\n\\label{eqn:31}\n\\end{equation}\nwhere $G_2$ includes all the two-loop vacuum diagrams. Note that we have kept all the couplings bare. In section \\ref{sec:diagrams} we compute the various diagrams contributing to $G_2$ in eq.~\\eqref{eqn:31}: the divergent terms in a generic $\\xi$-gauge\nand the finite pieces in the Landau gauge $\\xi =0$. Renormalization is discussed in section \\ref{count1}. As a check of the validity of our results we verify in appendix \\ref{app:jack} that the divergences that we obtain match with those computed with heath-kernel methods in ref.~\\cite{Jack:1982sn} in the Feynman gauge $\\xi=1$. \n\n\\subsection{Computation of the diagrams}\n\\label{sec:diagrams}\nThe leading interacting part of the free energy is given by connected vacuum diagrams up to order $g_0^2$. The corresponding contribution, which we will call $G_2$, is composed by the following two-loop diagrams:\n\\begin{equation}\n G_2=G_2^{\\mathrm{triple}}+G_2^{\\mathrm{ghost}}+G_2^{\\mathrm{ferm}}+G_2^{\\mathrm{quart}}+G_2^{\\mathrm{CT-vec}}+G_2^{\\mathrm{CT-gh}}\\,.\n \\label{eq:G2tot}\n\\end{equation}\nThe first four terms in \\eqref{eq:G2tot} are genuine two-loop graphs: \n\\begin{align}\n\\begin{split}\n&G_2^{\\mathrm{triple}}= \\ \\feynmandiagram[baseline=(a.base)][horizontal=a to b] {\na -- [ gluon, half left] b -- [ gluon, half left] a--[gluon]b\n}; \\ =\\int d^d x d^d x' \\sqrt{h}\\sqrt{h'}\\langle \\Gamma^{\\mathrm{triple}}(x) \\Gamma^{\\mathrm{triple}}(x') \\rangle \n\\, , \\\\\n&G_2^{\\mathrm{ghost}}=\\feynmandiagram[baseline=(a.base)][horizontal=a to b] {\na -- [charged scalar, half left] b -- [charged scalar, half left] a--[gluon]b\n};\\quad = \\int d^d x d^d x'\\sqrt{h}\\sqrt{h'} \\langle \\Gamma^{\\mathrm{ghost}}(x) \\Gamma^{\\mathrm{ghost}}(x') \\rangle \n\\, ,\\\\\n &G_2^{\\mathrm{ferm}}=\\feynmandiagram[baseline=(a.base)][horizontal=a to b] {\na -- [fermion, half left] b -- [fermion, half left] a--[gluon]b\n};\\, =\\int d^d x d^d x'\\sqrt{h}\\sqrt{h'} \\langle \\Gamma^{\\mathrm{fermion}}(x) \\Gamma^{\\mathrm{fermion}}(x') \\rangle \n\\, ,\\\\\n&G_2^{\\mathrm{quart}}=\\begin{tikzpicture}[baseline=(a.base)]\n\\begin{feynman}\\vertex (a) ;\n\\vertex [left=0.2cm of a] ;\n \\diagram* {a -- [gluon, out=45, in=-45, loop, min distance=1.2cm] a-- [gluon, out=-135, in=135, loop, min distance=1.2cm]a\n };\n \\end{feynman}\n \\end{tikzpicture}\\,=2\\int d^d x\\sqrt{h} \\langle \\Gamma^{\\mathrm{quart}}(x) \\rangle \n \\,.\n\\end{split}\n\\label{eq:diagram}\n\\end{align}\nThe last two ones are instead one-loop graphs with (one-loop) counterterm insertions:\n\\begin{eqnarray}\n&&G_2^\\text{CT-vec}= \\feynmandiagram[baseline=(a.base)][horizontal=a to b] {\na [crossed dot]]-- [gluon, half left] b -- [ gluon, half left] a\n};\\ = -2 \\delta_L\\ \\int d^d x \\langle \\frac{1}{2 \\xi}\\left(\\nabla_\\mu A^{\\mu a}(x)\\right)^2 \\rangle \n-2\\delta_T\\ \\int d^d x \\langle \\frac{1}{4 } \\left(\\nabla_\\mu A^a_\\nu(x) -\\nabla_\\nu A^a_\\mu(x) \\right)^2 \\rangle \n\\,, \\nonumber \\\\ \n&&G_2^\\text{CT-gh}= \\feynmandiagram[baseline=(a.base)][horizontal=a to b] {\na [crossed dot]]-- [charged scalar, half left] b -- [ charged scalar, half left] a\n};\\ =-2\\delta_c\\int d^d x \\langle \\left(\\bar{c}_a(x)\\nabla^2 c^a(x)\\right)\n\\rangle \n\\,.\n\\label{eq:diagram2}\n\\end{eqnarray}\nThese counterterms are defined from the renormalized Lagrangian\n\\begin{equation}\\label{eq:counterdef}\n\\frac{Z_T}{Z_{g^2} g^2} \\frac{1}{4 } \\left(\\nabla_\\mu A^a_\\nu(x) -\\nabla_\\nu A^a_\\mu(x) \\right)^2 + \\frac{Z_L}{2 g^2 \\xi}\\left(\\nabla_\\mu A^{\\mu a}(x)\\right)^2 - \\frac{Z_T^{3/2}}{Z_{g^2} g^2} f^{abc} \\nabla_\\mu A_\\nu^a A^{\\mu b} A^{\\nu^c} +\\dots~,\n\\end{equation}\nwhere $g_0^2 =Z_g^2 g^2$ is the relation between the bare and the renormalized coupling and $Z_{\\bullet} = 1 + \\delta_\\bullet g^2$. Thanks to the vector equations of motion, we have\n\\be\n\\int d^d x \\langle \\frac{1}{4 } \\left(\\nabla_\\mu A^a_\\nu(x) -\\nabla_\\nu A^a_\\mu(x) \\right)^2 \\rangle \n= - \n\\int d^d x \\langle \\frac{1}{2 \\xi}\\left(\\nabla_\\mu A^\\mu_a(x)\\right)^2 \\rangle + \\mathcal{O}(g)~,\n\\ee\nmodulo a $\\delta^{(d)}(0)$ factor, which vanishes in dimensional regularization.\nThe counterterms $\\delta_T$ and $\\delta_L$ entering the vector propagator can be computed in flat space and they read\n(see e.g. \\cite{Schwartz:2014sze}\\footnote{Comparing our Lagrangian \\eqref{eq:counterdef} with the definitions in section 26.5 of \\cite{Schwartz:2014sze}, we see that the relation between our counterterms and the counterterms $\\delta_3$ and $\\delta_{A^3}$ defined there are: $g^2 \\delta_T = \\delta_{A^3} -\\delta_3$, and $g^2 \\delta_{g^2} = \\delta_{A^3} -\\frac 32 \\delta_3$. Moreover since there is no correction proportional to the longitudinal part of the propagator, $\\delta_L = 0$. Note also that $\\epsilon_{\\text{there}} = -2 \\epsilon_{\\text{here}}$.})\n\\be\n\\delta_L = 0\\,, \\qquad \\delta_T = C_A\\frac{3+\\xi}{32\\pi^2\\epsilon}\\ \\big(1+\\mathcal{O}(g_0^2)\\big)\\,,\n\\ee\nwith $C_A=n_c$ for the $SU(n_c)$ group. The presence of the ghost counterterm is instead a peculiarity of $S^d$, consequence of the removal of zero modes from the propagator. We refer to appendix \\ref{app:ghost} for its computation. The final result is \n\\be\\delta_c=-C_A \\frac{3-\\xi }{64\\pi^2\\epsilon}\\ \\big(1+\\mathcal{O}(g_0^2)\\big)\\,.\n\\label{eq:count}\n\\ee\n\nApplying Wick's contraction and the previously listed Feynman rules to eq.~\\eqref{eq:diagram}, we get \n\\begin{align}\n&G_2^\\text{triple}=\\ \\kappa \\int d^{d} x \\ d^{d} x' \\sqrt{h} \\sqrt{h'}\\ \\left( \\nabla^\\mu\\nabla^{\\mu^\\prime} Q^{\\nu \\nu'}(Q_{\\mu\\mu'}Q_{\\nu\\nu'}-Q_{\\mu\\nu'}Q_{\\nu\\mu'} ) \\right. \\label{eq:g2triple} \\\\\n& \\quad \\quad+ \\nabla^\\nu Q^{\\mu \\mu '}(\\nabla^{\\nu '} Q_{\\nu\\mu'}Q_{\\mu\\nu'}-Q_{\\nu\\nu'}\\nabla^{\\nu'} Q_{\\mu\\mu'} )\\left.+ \\nabla_{\\nu} Q^{\\mu \\nu '}(\\nabla_{\\nu '} Q^{\\nu\\mu'}Q_{\\mu\\mu'}-Q^{\\nu\\mu'}\\nabla_{\\nu'} Q_{\\mu\\nu'} )\\right) \\,, \\nonumber\n\\\\\n&G_2^\\text{ghost}=\\ \\kappa \\int d^{d} x \\ d^{d} x' \\sqrt{h} \\sqrt{h'}\\ (\\nabla_\\mu G \\ \\nabla_{\\mu'}G \\ Q^{\\mu\\mu'})\\,,\n\\label{eq:g2ghost}\n\\\\\n&G_2^\\text{ferm}=-\\ n_f T_f \\left(n_c^{2}-1\\right) \\int d^{d} x \\ d^{d} x' \\sqrt{h} \\sqrt{h'}\\ (\\mathrm{Tr}\\left[\\gamma_\\mu S \\gamma_{\\mu'} S\\right] \\ Q^{\\mu\\mu'})\\,,\n\\\\\n&G_2^\\mathrm{quart}=-\\frac{\\kappa}{2} \\int d^{d} x \\sqrt{h}\\ (Q^{\\mu}_{\\ \\mu}Q^{\\nu}_{\\ \\nu}-Q_{\\mu \\nu}Q^{\\mu \\nu})\\,, \\label{eq:g2quartic}\n\\\\\n&G_2^\\text{CT-vec}=\\kappa \\frac{3+\\xi}{16\\pi^2\\epsilon}\\int d^{d} x \\sqrt{h}\\ \\left(\\frac{1}{2\\xi}\\nabla^\\mu\\nabla^\\nu Q^{\\mu}_{\\ \\nu}\\right)\\,, \\label{eq:g2ctvect}\n\\\\\n&G_2^\\text{CT-gh}=\\kappa \\frac{3-\\xi}{32\\pi^2\\epsilon} \\int d^{d} x \\sqrt{h}\\ (\\nabla^2 G)\\,. \\label{eq:g2ctghost}\n\\end{align}\nwhere $T_f=1/2$ for the fundamental representation and we have defined\n\\be\n\\kappa=C_{A}\\left(n_c^{2}-1\\right)\\,.\n\\ee\nNote that the first term in the triple diagram \\eqref{eq:g2triple} includes a double derivative of the vector propagator, which should be treated with care, because it contains a term proportional to a $\\delta$-function at coincident points, which contributes to the integral. A simple way to circumvent this problem consists in integrating by parts the first term of eq.~\\eqref{eq:g2triple} getting\n\\begin{align}\nG_2^\\text{triple}&=\\ \\kappa \\int d^{d} x \\ d^{d} x' \\sqrt{h} \\sqrt{h'}\\ \\left( \\nabla^{\\mu^\\prime} Q^{\\nu \\nu'}\\nabla^\\mu(-Q_{\\mu\\mu'}Q_{\\nu\\nu'}+Q_{\\mu\\nu'}Q_{\\nu\\mu'} ) \\right. \\\\& + \\nabla^\\nu Q^{\\mu \\mu '}(\\nabla^{\\nu '} Q_{\\nu\\mu'}Q_{\\mu\\nu'}-Q_{\\nu\\nu'}\\nabla^{\\nu'} Q_{\\mu\\mu'} ) \\left.+ \\nabla_{\\nu} Q^{\\mu \\nu '}(\\nabla_{\\nu '} Q^{\\nu\\mu'}Q_{\\mu\\mu'}-Q^{\\nu\\mu'}\\nabla_{\\nu'} Q_{\\mu\\nu'} )\\right)\\,.\\nonumber\n\\end{align}\nWe refer to appendix \\ref{app:contact} for more details on how to treat contact terms and integration by parts on $S^d$ in presence of delta function singularities.\n\nFor the first three integrals ($t=$ triple, $g=$ ghost, $f=$ fermion) we proceed as follows. As the integrals only depend on the geodesic distance, or equivalently on $z$, we can use spherical invariance to put $x'$ to zero and reduce the integration over $x'$ to a volume factor: \n\\begin{equation}\n G_2^i=\\int d^d x d^d x' \\sqrt{h}\\sqrt{h'}\\ g^i\\left(z\\right)=\\Omega_dR^d\\int d^d x \\sqrt{h}\\ g^i\\left(z\\right)\\,,\\quad i=t,g,f\\,.\n\\end{equation}\nThen we use stereographic coordinates to convert the remaining integral in $x$ to a one-dimensional integral in the variable $z$ defined in eq.~\\eqref{eq:zDef}:\n\\begin{equation}\n\\int d^d x \\sqrt{h}= \\Omega_{d-1}R^{2d}\\int_0^\\infty dx \\frac{2^dx^{d-1}}{(R^2+x^2)^d} \\,. \n\\end{equation}\nIn this way we write \n\\begin{equation}\n G_2^i=\\int_0^1 dz\\ f^i(z)\\,, \\quad i=t,g,f\\,,\n \\label{eq:fiint}\n\\end{equation}\nfor some functions $f^i(z)$. The integral \\eqref{eq:fiint} cannot be computed directly as it contains UV divergences in $d=4$. We isolate them by expanding $f^i(z)$ around coincident points, i.e. $z=1$:\n\\begin{equation}\n f^i(z)=\\sum_{k=n_i}^{N_i}\\left(f_{1k}^i(d)(1-z)^{k-1}+f_{2k}^i(d)(1-z)^{k-d/2+1}+f_{3k}^i(d)(1-z)^{k-d+3}\\right)+\\tilde{f^i}(z)\\, ,\n \\label{eq:fiSum}\n\\end{equation}\nwhere $f_{jk}^i(d)$ are analytic functions of $d$ and $\\tilde{f}^i$ remainder terms. The lower bound $n_i$ in the sum appearing in eq.~\\eqref{eq:fiSum} is the leading UV divergence of the integrand, and the upper bound $N_i$ is chosen in such a way that the integral of $\\tilde{f^i}(z)$ over $z$ between 0 and 1 is finite. \nWe write\n\\begin{equation}\nG_2^i=(G_2^{i})_{N_i}+\\widetilde{G_2^{i}},\n\\end{equation}\nwith\n\\begin{equation}\n (G_2^{i})_{N_i}=\\int_0^1 dz\\ \\sum_{k=n_i}^{N_i}\\left(f_{1k}^i(d)(1-z)^{k-1}+f_{2k}^i(d)(1-z)^{k-d/2+1}+f_{3k}^i(d)(1-z)^{k-d+3}\\right)\n \\label{eq:G2Ni}\n \\end{equation}\n and\n \\begin{equation}\n\\widetilde{G_2^i}=\\int_0^1 dz\\ \\tilde{f^i}(z)\\,,\n\\end{equation}\nwith $\\widetilde{G_2^i}$ finite. The integral $(G_2^{i})_{N_i}$ can be computed analytically using\n\\begin{equation}\n \\int_0^1(1-z)^{a-1}=\\frac{1}{a}\\, ,\n \\label{eq:maintool}\n\\end{equation}\nwhich is valid for $a>0$, but is extendable to any $d$-dependent $a$ by analytic continuation in $d$.\\footnote{Luckily, $f^i_{1k}(d)$ is zero for $k\\le 0$ in all the integrals that we have computed. Otherwise, analytic continuation of the dimension would not be sufficient to regulate the integral of eq.~\\eqref{eq:maintool}. }\nWe then set $d=4+2\\epsilon$ and extract the divergent part of eq.~\\eqref{eq:G2Ni} by expanding the result in powers of $\\epsilon$ and isolating the negative powers of $\\epsilon$. Note that the divergence of the integral has a double source: it comes from both integration over $z$ when $k=0$ and from the expansion of the functions $f_{jk}^i(d)$ around $d=4$.\\footnote{The functions $f^i_{jk}(d)$ remain separately divergent $k>0$, but for $k>N_i$ these divergences cancel when the $j = 1, 2, 3$ contributions are\nsummed up.} This explains the presence of double poles in the final result.\n\nFor the quartic and the counterterm diagrams the situation is simpler, as we have an integration over a single variable. Spherical invariance then means that we need to compute the integrand at coincident points and multiply it by a volume factor. \nWe work out the procedure for the quartic case \\eqref{eq:g2quartic} as example. We have\n\\begin{equation} \n G_2^\\mathrm{quart}=2\\kappa R^d\\Omega_d\\left. \\frac{\\alpha(z) (d-1)(2 \\beta(z) - d \\alpha(z))}{\n 4z^2}\\right|_{z\\rightarrow1}\\,,\n \\label{eq:G2quarticExp}\n\\end{equation}\nwhere $\\alpha$ and $\\beta$ are the coefficients of the two components of the vector propagator defined in eq.~\\eqref{eq:gaugeprop}.\nFor physical values of $d$, eq.~\\eqref{eq:G2quarticExp} is UV divergent. \nWe expand it around coincident points for generic $d$, obtaining\n\\begin{equation}\n G_2^\\text{quart}=\\left.\\ \\sum_{k=0}^{N}\\left(g_{1k}^\\text{q}(d)(1-z)^{k}+g_{2k}^\\text{q}(d)(1-z)^{-d/2+1+\n k}+g_{3k}^\\text{q}(d)(1-z)^{-d+2+k}\\right)\\right|_{z\\rightarrow1}\\,,\n \\end{equation}\n where $N\\ge1$ and $g_{jk}^\\text{q}$ are analytic functions of $d$. For sufficiently small $d$ all terms in the expansion vanish, except $g_{1k}^\\text{q}$, with $k=0$. We then get\n \\begin{eqnarray}\n G_2^\\mathrm{quart}=g_{10}^\\text{q}(d)= && \\hspace{-.5cm} -\\frac{ \\kappa R^{d-4}\\Gamma (d-1)}{2^{d+2} \\pi ^{\\frac{d}{2}}(d-3)^2 \\Gamma \\left(\\frac{d}{2}+1\\right)} \\left(\\gamma (d-3) \\xi +\\pi ((d-3) \\xi -d+1) \\cot \\Big(\\frac{\\pi d}{2}\\Big)\\right. \\nonumber \\\\\n && \\left.+\\big(d (\\xi -1)-3 \\xi +1\\big) \\psi(d)-\\gamma d+d+\\gamma \\right)^2 \\,.\n \\label{eq:g2QanaD}\n\\end{eqnarray}\nThe analytic continuation of eq.~\\eqref{eq:g2QanaD} for any $d$ gives us the final result.\nA similar computation of the integrals in eqs.~\\eqref{eq:g2ctvect} and \\eqref{eq:g2ctghost} gives just $-1$ and $-1/2$, respectively, for any $d$.\n\n\nWe finally expand eqs.~\\eqref{eq:g2triple}-\\eqref{eq:g2ctghost} around $\\epsilon=0$ with $d=4+2\\epsilon$, keeping terms up to constant order, and we get:\n\\begin{align}\n& \\!\\!\\! G_2^{\\mathrm{triple}}\\Big|_\\text{div.}\\!\\!\\! =\\kappa \\left( \\frac{(\\xi -3) (3 \\xi -7)}{192 \\pi ^2 \\epsilon ^2}\n+\\frac{\\xi (31 \\xi -64)-71-2 (\\xi -3) (3 \\xi -7) (\\gamma +\\log (4 \\pi R^2))}{384 \\pi ^2\n\\epsilon }\\right) \\,, \\label{eq:triple} \\\\\n& \\!\\!\\! G_2^{\\mathrm{ghost}}\\Big|_\\text{div.} =\\kappa \\left(\\frac{3-\\xi }{96 \\pi ^2 \\epsilon ^2}+\\frac{-\\xi -13+2 (\\xi -3) (\\gamma +\\log (4 \\pi R^2))}{192 \\pi ^2 \\epsilon }\\right) \\,,\n\\label{eq:ghost}\\\\\n& G_2^{\\mathrm{ferm}}\\Big|_\\text{div.} = (n_c^2-1)\\frac{n_f T_f}{6 \\pi ^2 \\epsilon } \\,, \\\\\n& \\!\\!\\! G_2^{\\mathrm{quart}}\\Big|_\\text{div.} =\\kappa \\left(-\\frac{(\\xi -3)^2}{64 \\pi ^2 \\epsilon ^2}+\\frac{(3-\\xi)(3+31\\xi) +6 (\\xi -3)^2 (\\gamma +\\log (4 \\pi R^2))}{384 \\pi^2 \\epsilon }\\right) \\,,\n\\label{eq:quartic}\\\\\n& G_2^{\\mathrm{CT-vec}}\\Big|_\\text{div.} =-\\kappa\\frac{3+ \\xi}{32 \\pi^2 \\epsilon} \\,, \\label{eq:CTvect} \\\\\n & \\!\\! G_2^{\\mathrm{CT-gh}}\\Big|_\\text{div.} =\\kappa \\frac{3 - \\xi}{32 \\pi^2 \\epsilon}\\,.\n\\label{eq:ghostcount}\n\\end{align}\nSumming all the contributions gives \n\\begin{equation}\n \\left. G_2\\right|_\\mathrm{div.}=-(n_c^2-1)\\frac{11C_A-4n_f T_f}{48 \\pi^2 \\epsilon}\\ .\n \\label{eq:g2finalpole}\n\\end{equation}\nNote that the results in eq.~\\eqref{eq:triple}-\\eqref{eq:quartic} have double poles, which cancel in the sum. Moreover, after summation the $\\xi$-dependence of $G_2$ cancels, as required from gauge invariance of the total free energy.\n\nAs explained before, we compute finite terms only in the Landau gauge $\\xi\\rightarrow0$. These are computed numerically.\nHowever, thanks to the integer-relation finding algorithm PSLQ \\cite{Bailey:1995}, we can obtain the exact result from the approximated one:\n\\begin{align}\n\\left.G_2^{\\mathrm{triple}}\\right|_\\text{fin.}&=\\kappa \\frac{-562 + 63 \\pi^2 + \n 6 (\\gamma + \\log(4 \\pi R^2)) (71 + \n 21 (\\gamma + \\log(4 \\pi R^2)))}{2304 \\pi^2}\n\\, , \\label{eq:G2tFinite} \\\\\n\\left.G_2^{\\mathrm{ghost}}\\right|_\\text{fin.}&=\\kappa \\frac{97 + 9 \\pi^2 + \n 6 (\\gamma + \\log(4 \\pi R^2)) (13 + \n 3 (\\gamma + \\log(4 \\pi R^2)))}{1152 \\pi^2}\\, , \\label{eq:G2gFinite} \\\\\n \\left. G_2^{\\mathrm{ferm}}\\right|_\\text{fin.}&= (n_c^2-1)n_fT_f\\frac{5+3 (\\gamma + \\log (4\\pi R^2))}{36 \\pi^2}, \\label{eq:G2fFinite} \\\\\n\\left.G_2^{\\mathrm{quart}}\\right|_\\text{fin.}&=\\kappa \\frac{128 - 9 \\pi^2 - \n 6 (\\gamma + \\log(4 \\pi R^2)) (1 + \n 3 (\\gamma + \\log(4 \\pi R^2)))}{256 \\pi^2}\\,,\n\\end{align}\nand zero for the counterterms, leading to \n\\begin{equation}\n \\left. G_2\\right|_\\mathrm{fin.}=(n_c^2-1)\\left(C_A\\frac{49 + 33 (\\gamma + \\log(4\\pi R^2))}{144 \\pi^2}-n_f T_f\\frac{5+3 (\\gamma + \\log (4\\pi R^2))}{36 \\pi^2}\\right) .\n \\label{eq:g2final}\n\\end{equation}\n\n\\subsection{Renormalization}\n\\label{count1}\n\nLet us now check that the free-energy \\eqref{eqn:31} is UV finite up to order $g^2$, when expressed in terms of renormalized couplings.\nThe bare curvature couplings in eq.~\\eqref{eq:c3} renormalize as follows \\cite{Jack:1990eb}:\n\\begin{align}\nb_0&=\\mu^{2\\epsilon}\\left(b+\\frac{62(n_c^2-1)+11n_f n_c}{720(4\\pi)^2 \\epsilon}+\\mathcal{O}(g^4)\\right) \\,,\n \\label{eqn:4} \\\\\nc_0&=\\mu^{2\\epsilon}\\left(c+\\mathcal{O}(g^6)\\right)\\,,\n \\label{eqn:5}\n\\end{align}\nwhile for the gauge coupling we have the well-known relation\n\\be\ng_0^2=\\mu^{-2\\epsilon}\\Big(g^2+\\frac{11C_A-4n_f T_f}{3 \\epsilon} \\frac{g^4}{(4\\pi)^2}+\\mathcal{O}(g^6)\\Big)\\,,\n\\label{eq:g0Tog}\n\\ee\nwhere $\\mu$ is the RG sliding scale. Expanding in $\\epsilon$ for $d=4+2\\epsilon$, we get the following divergent contribution from eq.~\\eqref{eqn:31} at ${\\cal O}(g^0)$:\n\\be\\begin{split}\nF_\\text{free-YM}|_\\text{div.}& =-\\frac{31 (n_c^2-1)}{90\\epsilon}\\,, \\\\\n n_f n_c F_\\text{free-ferm}|_\\text{div.} & =-\\frac{11 n_f n_c}{180\\epsilon} \\,, \\\\\n F_{\\text{curv}} |_\\text{div.} & = \\frac{31(n_c^2-1)}{90 \\epsilon} + \\frac{11n_f n_c}{180 \\epsilon}\\,,\n\\end{split}\\ee\nwhich cancel in the sum. At ${\\cal O}(g^2)$ we have\n\\be\\begin{split}\n-\\frac12 (n_c^2-1) \\log(g_0^2)|_\\text{div.} & = - g^2(n_c^2-1)\\frac{11C_A - 4 n_f T_f}{96\\pi^2 \\epsilon}+\\mathcal{O}(g^4) \\,, \\\\\n-\\frac 12 g_0^2 G_2|_\\text{div.} & = g^2 (n_c^2-1)\\frac{11C_A - 4 n_f T_f}{96\\pi^2 \\epsilon}+\\mathcal{O}(g^4) \\,,\n\\end{split}\\ee\nwhich also cancel in the sum. \nTherefore we obtained, as expected, a finite result for the total free-energy at order ${\\cal O}(g^2)$, and in any $\\xi$-gauge.\n\n\n\\subsection{Free energy at the fixed point}\n\nWe determine here the final form of the free-energy at the fixed point obtained in the $\\epsilon$ expansion up to ${\\cal O}(\\epsilon)$.\nThe fixed point is obtained by setting to zero the gauge and the curvature beta-functions $\\beta_g$, $\\beta_b$ and $\\beta_c$.\n$\\beta_b$ and $\\beta_c$, computed in \\cite{Jack:1990eb}.\nAt the fixed point $g^* ,b^*,c^*$ we have\n\\begin{equation}\n F_{\\text{conf}}(\\epsilon)=F(g^*,b^*,c^*,\\mu R) \\,,\n \\label{eq:Ffixed}\n\\end{equation}\nof order $\\epsilon$ up to two loops. Note that $F_{\\text{conf}}$ has to be conformal invariant and therefore the dependence on $R$ has to cancel in the final result. \nThe expressions for $\\beta_g$, $\\beta_b$ and $\\beta_c$ --up to the order required to get $ F_{\\text{conf}}(\\epsilon)$ to order $\\epsilon$-- are\n\\begin{align}\n\\beta_g & =\\epsilon g-\\Big(\\frac{11}{3}C_A-\\frac{4}{3}T_fn_f\\Big) \\frac{g^3}{(4\\pi)^2}-\\Big(\\frac{34}{3}{C_A}^2-\\frac{20}{3}C_A T_f n_f-4C_f T_f n_f\\Big) \\frac{g^5}{(4 \\pi)^4}+\\mathcal{O}(g^7) \\nonumber \\,, \\\\\n\\beta_b&=-2\\epsilon b-\\frac{62 (n_c^2-1)+11n_f n_c}{360(4\\pi)^2 }-\\frac{(n_c^2-1)}{8}\\left(\\frac{34}{3}{C_A}^2-\\frac{20}{3}C_A T_f n_f-4C_f T_f n_f\\right) \\frac{g^4}{(4\\pi)^6}+\\mathcal{O}(g^6)\\,,\n\\nonumber \\\\\n\\beta_c&=-2\\epsilon c+\\mathcal{O}(g^6)\\,,\n\\end{align}\nfrom which we get\n\\begin{align}\ng_*&=4\\pi\\sqrt{\\frac{3\\epsilon}{11 C_A-4n_fT_f }}\\left(1-\\frac{3 (17 C_A^2 - 10 C_A n_f T_f - 6 C_f n_f T_f)}{(11 C_A - 4 n_f T_f)^2}\\epsilon +\\mathcal{O}(\\epsilon^2)\\right) \\,,\n\\nonumber \\\\\nb_*&=-\\left(\\frac{62 (n_c^2-1)+11n_f n_c}{720(4\\pi)^2 \\epsilon}+\\frac{(n_c^2-1)(17 C_A^2 - 10 C_A n_f T_f - 6 C_f n_f T_f)}{24\\epsilon} \\frac{g_*^4}{(4\\pi)^6}\\right)+\\mathcal{O}(\\epsilon^2)\\,,\n\\nonumber \\\\\nc_*&=\\mathcal{O}(\\epsilon^2)\\,,\n \\label{eqn:11}\n\\end{align}\nwhere\n\\be\nC_f = \\frac{n_c^2-1}{2n_c}\\,.\n\\ee\nNote that, since $\\beta_b$ contains a constant term, $b^*$ starts at order $1/\\epsilon$.\n\nPlugging eq.~\\eqref{eqn:11} in the free energy \\eqref{eqn:31} and using the results for $G_2$ obtained in section \\ref{sec:diagrams}, including the finite pieces computed in the $\\xi=0$ gauge, we obtain\n\\begin{equation} \\boxed{\n\\begin{split}\nF_\\text{conf} &=(n_c^2-1)\\left(F_{\\mathrm{Max}} (d)-\\frac{1}{2}\\log \\Big(\\frac{48 \\pi^2 \\epsilon}{11 C_A - 4 n_f T_f}\\Big)\\right)+n_f n_c F_\\text{free-ferm}(d)+\\log\\left(\\frac{\\mathrm{vol}(SU(n_c))}{(2\\pi)^{n_c^2-1}}\\right)\\\\ &\n+(n_c^2-1)\\left(\\frac{- {n_f} {T_f} \n (1089 {C_f}-913C_A+584 {n_f} {T_f})}{121 (11 {C_A}-4 {n_f} {T_f})^2}-\\frac{386+363 \\big(\\gamma+\\log (4 \\pi ) \\big)}{726}\\right)\\epsilon+{\\cal O}(\\epsilon^2)\\,,\n \\end{split} \\label{eq:fFinal} }\n\\end{equation} \nwhere $F_{\\mathrm{Max}}$ and $F_\\text{free-ferm}$ are given in eqs.~\\eqref{eq:t} and \\eqref{eq:r}, respectively.\nThe volume of the $SU(n)$ group reads (see e.g. \\cite{Marino:2011nm})\n \\be\n \\text{vol}(SU(n))=\\frac{(2\\pi)^\\frac{n(n+1)-2}{2}}{\\prod_{k=1}^{n-1}k!} \\,.\n \\ee\n The cancellation of the $\\log(\\mu R)$ term\\footnote{All the $\\log R$ terms appearing in the loop computations of section \\ref{sec:diagrams} arise from the expansion of an overall $R^{d-4}$ factor present in all the contributions. When moving from $g_0$ to $g$ via eq.~\\eqref{eq:g0Tog} we effectively have $R\\rightarrow \\mu R$. \\label{footnotemuR}} \npresent in the two loop correction \\eqref{eq:g2final} with those arising from the replacement of the bare coupling $b_0$ in eq.~\\eqref{eq:d3} and $g_0$ in the log term in eq.~\\eqref{eqn:31} is a check of the result. Equation \\eqref{eq:fFinal} is the main result of this work. \n \nAs discussed in the introduction, the conjectured generalized $F$-theorem \\eqref{eq:Ftheo} involves the modified free energy \\eqref{eq:FtildeDef}. Using the expression for $F_\\text{conf}$ we get\n\\begin{align}\n\\widetilde{F}_\\text{conf} =&(n_c^2-1)\\left(\\widetilde{F}_{\\mathrm{Max}} (d)+\\frac{1}{2}\\sin\\Big(\\frac{\\pi d}{2}\\Big) \n\\log \\Big(\\frac{48 \\pi^2 \\epsilon}{11 C_A - 4 n_f T_f}\\Big)\\right)+n_f n_c \\widetilde{F}_\\text{free-ferm}(d) \\nonumber \\\\\n & -\\frac{1}{2}\\sin\\Big(\\frac{\\pi d}{2}\\Big) \\log\\left(\\frac{\\mathrm{vol}(SU(n_c))}{(2\\pi)^{n_c^2-1}}\\right) \\label{eq:tildef} \\\\\n&+(n_c^2-1)\\left(\\frac{{n_f} {T_f} (1089 {C_f}-913C_A+584 {n_f} {T_f})}{121 (11 {C_A}-4 {n_f} {T_f})^2}+\\frac{386+363 (\\gamma+\\log (4 \\pi )}{726}\\right)\\pi\\epsilon^2 +\\mathcal{O}(\\epsilon^3), \\nonumber \n\\end{align}\nwhere\n\\be\n\\widetilde{F}_{\\mathrm{Max}} = -\\sin\\left(\\frac{\\pi d}{2}\\right)F _{\\mathrm{Max}} \\,, \\qquad \n\\widetilde{F}_{\\mathrm{free-ferm}} = -\\sin\\left(\\frac{\\pi d}{2}\\right)F _{\\mathrm{free-ferm}} \\,.\n\\ee\n\nFor completeness we report its expression in the Veneziano limit, where $n_c,n_f\\rightarrow \\infty$ with $x=n_f/n_c$ fixed. We get \n\\begin{equation}\n\\begin{split}\nF_\\text{conf} &=n_c^2 \\left(F_{\\mathrm{Max}} (d)-\\frac{1}{2}\\log \\Big(\\frac{48 \\pi^2 \\epsilon}{11-2x}\\Big)+x F_\\text{free-ferm}(d)+\\frac{3}{4}-\\frac{1}{2}\\log(2 \\pi )\\right.\\\\ &\\left.-\\left(\\frac{193}{363}-\\frac{737 x-584x^2}{484 (11-2x)^2}+\\frac{1}{2} (\\gamma+\\log(4 \\pi) )\\right)\\epsilon\\right)+\\mathcal{O}(n_c)\\,.\n\\end{split}\n\\end{equation} \nNote that $n_c^2 \\log(n_c)$ terms are induced from both log terms appearing in eq.~\\eqref{eq:fFinal} and they precisely cancel.\nThe same cancellation occurs in the t' Hooft limit. This cancellation is expected from large $n_c$ considerations and the fact that a log term is not expected in the genus expansion. \n\n\\section{Applications}\n\nIn this section we are going to use the conjectured monotonicity of $\\widetilde{F}$ along RG flows \\cite{Giombi:2014xxa} to test some proposed RG flows in $d=3$ and $d=5$, using our result \\eqref{eq:fFinal}. The perturbative expression in eq.~\\eqref{eq:fFinal} is not adequate to extrapolate to physical dimensions with $|\\epsilon| =1/2$. The number of available terms (three) is too limited to attempt a Borel resummation. In the same spirit of ref.~\\cite{Giombi:2015haa}, we will instead look for Pad\\'e approximants for $\\widetilde F$. We also use the knowledge of $\\widetilde F$ for special values of $d$\nto effectively increase by one order the expansion in $\\epsilon$. \n\nNote that $\\widetilde F$ contains a $\\log( \\epsilon)$ term, which, being non-analytic, prevents the application of standard Pad\\'e approximants. Moreover, the free-fermion one-loop determinant is known exactly as a function of $d$ and it is convenient to keep it not expanded in $\\epsilon$. For these reasons, we split the total $\\widetilde F$ in two parts, one that we keep in $d$ dimensions and contains the non-analytic term, and one that is a series in $\\epsilon$. \nFollowing ref.~\\cite{Giombi:2015haa}, we split $\\widetilde{F}_\\text{conf}$ as\n\\be\n \\widetilde{F}_\\text{conf}=n_f n_c \\widetilde{F}_\\text{free-ferm}+\\frac{1}{2}\\sin\\left(\\frac{\\pi d}{2}\\right)(n_c^2-1)\\log\\left(\\frac{2\\epsilon}{11 C_A - 4 n_f T_f}\\right)+\\delta \\widetilde{F}(\\epsilon),\n \\label{eq:Ftildeconf}\n\\ee\nand we use Pad\\'e approximants only on the $\\delta \\widetilde{F}(\\epsilon)$ term. The latter includes the free photon contribution, which is evaluated numerically, and reads\n\\begin{align}\n& \\delta \\widetilde{F}(\\epsilon)=(n_c^2-1) \\frac{31\\pi}{90} +\\left((n_c^2-1)4.696- \n\\pi \\log\\left(\\frac{\\text{vol}(SU(n_c))}{(2\\pi)^{n_c^2-1}}\\right)\\right)\\epsilon \\label{eq:deltatildeF} \\\\\n&+(n_c^2-1)\\left(\\frac{n_f\\pi(584 {n_f} {n_c}-1089-737 n_c^2)}{484 n_c (11 n_c-2 {n_f})^2}+\\frac{386\\pi+363\\pi (\\gamma+\\log (4 \\pi )}{726}-10.098\\right)\\epsilon^2 +\\mathcal{O}(\\epsilon^3)\\,.\n \\nonumber\n \\end{align}\n For presentation purposes we rounded to the first 4 digits the ${\\cal O}(\\epsilon)$ and ${\\cal O}(\\epsilon^2)$ contribution coming from the photon free energy, but \n the result is available to higher precision.\nLet us stress the fact that the above splitting is arbitrary and that the corresponding choice significantly affects the final results. This is a signal of the poor knowledge that we have on the series.\nFor the same reason we have not attempted to estimate an error bar in our results. \n \nThe fixed points we get in $d=4+2\\epsilon$ of QCD$_d$ with gauge group $SU(n_c)$ and $n_f$ massless Dirac fermions in the fundamental representation \nare expected to match two known CFTs:\n\n\\begin{itemize}\n\\item For $\\epsilon=-1$ ($d=2$) the IR fixed point of QCD$_d$ with gauge group $SU(n_c)$ and $2n_f$ massless Dirac fermions in the fundamental representation\nis an $SU(2n_f)_{n_c}$ Wess-Zumino-Witten model with an additional decoupled free boson \\cite{Affleck:1985wa, Gepner:1984au}. This CFT has central charge \n\\be c=\\frac{n_c(4n_f^2-1)}{2 n_f+n_c} + 1 \\,,\n\\ee \nand \n\\be\n\\widetilde F_{\\text{WZW}}(d=2) = \\frac{\\pi}{6} c \\,.\n\\ee\nPlugging $d=2$ in eq.~\\eqref{eq:Ftildeconf} and identifying $\\widetilde F_\\text{conf}$ with $\\widetilde F_{\\text{WZW}}$ gives \n\\begin{equation}\n\\delta\\widetilde{F}(\\epsilon=-1)=\\widetilde F_{\\text{WZW}} -n_c n_f \\widetilde{F}_\\text{free-ferm}=-\\frac{\\pi}{3}\\frac{n_f(n_c^2-1)}{2 n_f+n_c}\\,.\n\\label{eq:d2WZW}\n\\end{equation}\n\\item For $\\epsilon=1$ ($d=6$) the theory is conjectured to have a non-unitary UV fixed point described by a Lagrangian with a higher-derivative kinetic term $F^a_{\\mu\\nu}\\nabla^2F_a^{\\mu\\nu}$ \\cite{Gracey:2015xmw,Casarin:2019aqw}, whose anomaly coefficient is $a=-(n_c^2-1)\\frac{55}{84}$ \\cite{Giombi:2015haa}. This leads to \n\\be\\label{eq:d6bc}\n\\delta\\widetilde{F}_{d=6}=\\frac{\\pi}{2}a=-\\frac{55\\pi}{168}(n_c^2-1)\\,.\n\\ee\n\\end{itemize}\n\nTo improve the numerical estimate of our result we constrain the Pad\\'e approximants of $\\delta\\widetilde{F}$ to these known points. In order to avoid misleading results, we exclude approximants with poles in the range between the constraint and $d=4$. \n \n \n \n\\subsection{${F}$-Theorem in $d=3$}\n\nNon-abelian $3d$ gauge theories have received particular attention in the last years due to their possible emergence in quantum phase transitions with deconfined criticality \\cite{Wang:2017txt}\nand as theories governing domain walls among different vacua in non-abelian $4d$ gauge theories \\cite{Gaiotto:2017tne}. Theoretically, they are of course also interesting theories by themselves.\n \n \\begin{figure}[t!]\n \\centering \n \\includegraphics[width=0.45\n \\columnwidth]{nf14dfracZoom.pdf} \n \\includegraphics[width=0.535\n \\columnwidth]{nf14dfracV2.pdf} \n\\caption{Left panel: Comparison between $\\Delta \\widetilde{F}$ as a function of the dimension $d$ for small $\\epsilon$ computed by using \nthe result for $\\widetilde F$ in eq.~\\eqref{eq:tildef} (red) or only its free part given by the first two rows of eq.~\\eqref{eq:tildef} (blue). Right panel: Same comparison extended up to $d=3$.}\n\\label{fig:nf14dfrac}\n\\end{figure}\n\nIt is known since the early work \\cite{Appelquist:1989tc} that at large $n_f$ QCD$_3$ flows in the IR to a CFT. For $n_f\\leq n_f^*$, with $n_f^*$ an unknown parameter, a phase with spontaneous symmetry breaking (SB) of the $U(2n_f)$ global symmetry is expected. The only pattern of spontaneous breaking of the global symmetry $U(2n_f)$ compatible with the results of \\cite{Vafa:1983tf,Vafa:1984xh} is\n\\be\nU(2n_f)\\rightarrow U(n_f) \\times U(n_f)\\,.\n\\label{eq:SSBphase}\n\\ee\nMore recently, a qualitative phase diagram of the theory as a function of the number of flavors $n_f$, a fermion mass term, and the level $k$ of a possible Chern-Simons term has been suggested \\cite{Komargodski:2017keh}. We will focus on $k=0$ in the following and use the $F$-theorem to put an upper bound on $n_f^*$.\nA naive way to check if the spontaneous symmetry breaking phase \\eqref{eq:SSBphase} can be realized would be to compare $F_{\\text{IR}}= F_{\\text{SB}}$ as given by $2n_f^2$ Goldstone bosons \n(free in the deep IR), with $F_{\\text{UV}}$ given in the deep UV by $n_c^2-1$ free photons and $n_f n_c$ free fermions. Unfortunately, due to the log term in \\eqref{eq:s}, \n$F_{\\text{UV}}$ diverges and no useful information can be extracted. We overcome this problem by assuming that conformality is lost at $n_f=n_f^*$ by annihilation between the critical QCD$_3$ fixed point with another one, known as QCD$_3^*$ \\cite{Kaplan:2009kr}. A similar analysis for QED$_3$ has been performed in \\cite{Giombi:2015haa}.\nTreating $n_f$ as a continuous parameter, for $n_f= n_f^*+\\eta$ and $0<\\eta \\ll 1$, the theory flows to the IR fixed point QCD$_3$. On the other hand, for $n_f= n_f^*-\\eta$ the theory is expected to undergo a weak first-order phase transition \\cite{Gorbenko:2018ncu} (i.e. a walking regime, see \\cite{Benini:2019dfy} for an explicit realization in $4d$ gauge theories) with a slow RG passing close to the (now complex) fixed points, reaching eventually the spontaneously broken phase \\eqref{eq:SSBphase}. By continuity and the generalized $F$-theorem, we then expect that \n\\be\n\\Delta\\widetilde F(n_f^*)= \\widetilde F_{\\text{conf}}(n_f^*) - \\widetilde F_{\\text{SB}}(n_f^*)>0 \\,.\n\\label{eq:DeltaFDef}\n\\ee\nNote that values of $n_f$ such that $\\Delta\\widetilde F(n_f)<0$ are incompatible with a symmetry breaking phase. On the other hand, values of $n_f$ with $\\Delta\\widetilde F(n_f)>0$ \nare compatible with either a CFT or a symmetry breaking phase. For this reason we can only determine an upper bound $n_f^*\\leq n_f^0$, where $\\Delta\\widetilde F(n_f^0) = 0$.\n\nAn early previous estimate of $n_f^*$ was based on Schwinger-Dyson gap equations \\cite{Appelquist:1989tc} and resulted in $n_f^* \\approx 128(n_c^2-1)/(3\\pi^2 n_c)$.\nMore recently, a lattice analysis \\cite{Karthik:2018nzf} found $n_f^*\\leq 4$ for $n_c=2$. \nAn estimate based on the $F$-theorem already appeared in \\cite{Sharon:2018apk}, where as UV theory it was used a SUSY version of QCD$_3$, a genuine CFT with finite $F$ which can flow to QCD$_3$ by appropriate deformations. By comparing $F_{\\text{SUSY}}$ computed by means of supersymmetric localization with $F_{\\text{SB}}$ (and assuming that we can flow from the IR SCQD$_3$ fixed point to the IR QCD$_3$ fixed point), it was found $n_f^*< 13/2$ for $n_c=2$.\n\n \\begin{figure}[t!]\n \\centering \n \\includegraphics[width=0.7\n \\columnwidth]{Su2nf12.pdf} \n\\caption{Values of $\\Delta \\widetilde{F}$ for $SU(n_c)$ as a function of the dimension $d$ computed with Pad\\'e-approximants [2/1] (continuous line) and [1/2] (dashed line) at $n_c=2,3,4,5$. The value of $n_f$ is set to the smallest integer without poles in both approximants in $20$.} \n\\label{fig:su3}\n\\end{figure}\n\nThe value of $\\widetilde F_{\\text{SB}}(n_f)$ is easily computed by noting that the $2n_f^2$ Goldstone bosons associated to the breaking pattern \\eqref{eq:SSBphase}\nbecome free in the deep IR.\nThe contribution to the free energy for a single real scalar reads \\cite{Giombi:2014xxa}\n\\be\n\\begin{split}\nF_\\text{free-sc}& =-\\frac{1}{\\sin(\\frac{\\pi d }{2})\\Gamma(1+d)}\\int_0^1 du\\ u \\sin(\\pi u)\\Gamma\\left(\\frac{d}{2}+u\\right) \\Gamma\\left(\\frac{d}{2}-u\\right)\\,, \\\\\n\\widetilde{F}_{\\mathrm{free-sc}} & = -\\sin\\left(\\frac{\\pi d}{2}\\right)F _{\\mathrm{free-sc}} \\,.\n\\end{split}\n\\ee\nWe then have\n\\be\n\\widetilde F_{\\text{SB}}(n_f) = 2n_f^2 \\widetilde{F}_{\\mathrm{free-sc}}\\,.\n\\ee\nFor $d=3$ it reads\n\\begin{equation}\n\\widetilde{F}_\\text{SB}=2n_f^2 \\left(\\frac{\\log 2}{8}-\\frac{3\\zeta(3)}{16\\pi^2}\\right)\\,.\n\\end{equation}\n %\n\\begin{table}\n\\centering\n\\begin{tabu}{|c|c|c|c|c|c|}\n\\hline$n_f$ & 12 & 13 & 14 & 15 & 16 \\\\\n\\hline \\rowfont{\\color{red}}SB & $18.38$&$21.57$&$ 25.01$&$ 28.71$&$ 32.67$ \\\\\n\\hline ${[2/1]}$ & $12.1$ & $13.1$ & $13.6$ & $13.9$ &$14.16$ \\\\\n\\hline ${[1/2]}$ & $-$ & $13.2$ & $13.9$ & $15.01$&$16.10$ \\\\\n\\hline\n\\end{tabu}\n\\caption{Comparison between the $3d$ values of $\\widetilde F$ in the broken phase $\\widetilde{F}_\\text{SB}$ (red) with those obtained from Pad\\'e-approximants [2/1] and [1/2] \nof $\\widetilde{F}_\\text{conf}$ for QCD$_3$ with $n_c=2$. In all cases $\\Delta \\widetilde{F}< 0$.}\n\\label{tab:1}\n\\end{table}\nBefore presenting the results of our extrapolations to $d=3$, it is useful to see the effect of the 2-loop correction to the free energy with respect to the one-loop\nfree theory contribution in the controlled regime with $|\\epsilon| \\ll 1$. This is shown in fig. \\ref{fig:nf14dfrac} where we plot $\\Delta \\widetilde{F}$ (for $n_c=2$ and $n_f=14$)\ndefined as in eq.~\\eqref{eq:DeltaFDef} as a function of the dimension $d$. We compare the result for $\\widetilde F_{\\text{conf}}$ obtained using eq.~\\eqref{eq:tildef} (red line) \nwith the one obtained using only the first two rows of the same equation (blue line), i.e. only its free part. \nWe note that the effect of the interactions is to favor the SB phase with respect to the conformal one and that the latter is more favored as we lower the space-time dimensions. More importantly, we see from the left panel in the figure that when $|\\epsilon| \\approx 0.1$ the one and two-loop results differ significantly and that there is no hope to get \nreliable results from perturbation theory in $d=3$ (for illustration purposes we report in the right panel of fig. \\ref{fig:nf14dfrac} the same plot extended up to $d=3$).\nAs anticipated at the beginning of the section, we then consider Pad\\'e approximants of \\eqref{eq:deltatildeF}. For $d<4$ we augment the approximant by one more term by imposing the constraint \\eqref{eq:d2WZW}.\n\n\\begin{figure}[t!]\n \\centering\n \\includegraphics[width=0.46\n \\columnwidth]{Su2nf0to10.pdf} \\includegraphics[width=0.5\n \\columnwidth]{Su3nf0to20.pdf} \\, \\includegraphics[width=0.46\n \\columnwidth]{Su4nf0to20.pdf} \\includegraphics[width=0.424\n \\columnwidth]{Su8nf0to40.pdf} \\qquad \\quad\\,\\caption{Comparison between the $3d$ value of $\\widetilde{F}_\\text{SB}$ (red line) and of the real part of $\\widetilde{F}_\\text{conf}$ (blue points) as a function of $n_f$ for $n_c=2,3,4,5$\n. The [1/2] approximants provide $\\text{Re}[\\widetilde{F}_\\text{conf}]>\\widetilde{F}_\\text{SB}$ for $n_f\\le8,12,17,22$ suggesting that a chiral symmetry breaking may occur in these ranges of values.}\n\\label{fig:su2nflow}\n\\end{figure}\n\nIn fig.~\\ref{fig:su3} we show the value of $\\Delta\\widetilde{F}$ as a function of the dimension $d$ for $n_c=2,3,4,5$ and $n_f$ equal to the smallest integer without poles in approximants [1/2] and [2/1] satisfying $g^{*2}>0$, i.e. $n_f=13,19,25,31$ respectively.\\footnote{Note that regions in $n_f$ close to $11n_c/2$ are more subject to instabilities as $g^{*2}$ blows up there, producing a pole of order two in the free energy. This is another reason to avoid smaller values of $n_f$ which still satisfy $g^{*2}>0$ (i.e. $n_f=12$ for $n_c=2$). }\nWe see that at $d=3$ $\\Delta\\widetilde{F}<0$ in all these cases, indicating the presence of the conformal phase. As expected, this behavior persists for higher values of $n_f$: \nwe report in tab.~\\ref{tab:1} the comparison between the free energy $\\widetilde F_{\\text{conf}}$ and that of the broken phase for $n_c=2$, $12\\le n_f\\le 16$. Not only the value of $\\widetilde F_\\text{SB}$ remains above $\\widetilde{F}_\\text{conf}$, but also the gap between the two values gets larger and larger. \n\n\\subsubsection{Small $n_f$}\n\nThe one-loop beta-function of the gauge coupling vanishes at $n_f=11 n_c/2$ and changes sign below that, making $g^{*2}_{\\text{one-loop}}<0$. \nOf course, a unitary fixed point in $d=3$ does not necessarily appear as a real {\\it one-loop} fixed point when $\\epsilon \\ll 1$.\\footnote{A notable example of this sort is provided by the abelian Higgs model of $n$ complex scalar fields. It is known that in this theory a real one-loop Wilson-Fisher fixed-point appears for $n> 183$ \\cite{Halperin:1973jh} and this number greatly varies with the order, see e.g. \\cite{Ihrig:2019kfv}. It is in fact likely that the $3d$ abelian Higgs theory has an IR conformal phase for values of $n$ well below 183.} \nAs mentioned, lattice results for $SU(2)$ find that $n_f^*\\leq 4$, suggesting that even if $g^{*2}_{\\text{one-loop}}<0$, there exists a range in $n_f$ where the $3d$ theory is conformal in the IR.\nFor $n_f<11n_c/2 $ we could still use the free energy to extract information on the RG flow. For $g^{*2}<0$, the free energy becomes complex, due to the $\\log$ term in eq.~\\eqref{eq:Ftildeconf},\nwith an opposite phase depending on which of the two imaginary fixed points is chosen: \n\\begin{equation}\n\\log(g^{*2})=\\log(|g^{*2}|)\\pm i \\log(\\pi)\\,.\n\\end{equation}\nWe propose to estimate the value of $F$ at the strongly coupled real fixed point by an extrapolation of the half-sum of the two complex values obtained with the $\\epsilon$-expansion, i.e. of their real part. The stability of the conformal phase then requires this value to be smaller than $\\widetilde{F}_\\text{SB}$. As a result, our more speculative criterion in the range $n_f<11n_c/2$ is\n\\be\n{\\rm Re}\\, \\Delta\\widetilde F(n_f^*)= \\widetilde F_{\\text{conf}}(n_f^*) - {\\rm Re}\\, \\widetilde F_{\\text{SB}}(n_f^*)>0 \\,.\n\\label{deltaftilde}\n\\ee\nWe report in fig.~\\ref{fig:su2nflow} the real part of $\\widetilde{F}_\\text{conf}$ compared to $\\widetilde{F}_\\text{SB}$ for $n_c=2,3,4,5$ computed with the Pad\\'e approximant [1/2]. We see that in all cases there is a wide range of $n_f$ for which the conformal phase appears to be unstable. We have \n\\begin{align}\n\\begin{split}\nn_f^* & \\lesssim 8 \\,,\\qquad \\qquad \\;\\;SU(2) \\,, \\\\\nn_f^* & \\lesssim12 \\,,\\qquad \\qquad SU(3)\\,, \\\\\nn_f^*& \\lesssim17 \\,,\\qquad \\qquad SU(4)\\,, \\\\\nn_f^* & \\lesssim 22 \\,,\\qquad \\qquad SU(5) \\,.\n\\label{eq:nfbounds}\n\\end{split}\n\\end{align}\nThe upper bound for $SU(2)$ is consistent with the bound $n_f^*< 13/2$ of \\cite{Sharon:2018apk}, and $n_f^*\\leq 4$ of \\cite{Karthik:2018nzf}. A similar analysis can be done in the Veneziano limit, by taking the large $n_c,n_f$ limit of eq.~\\eqref{deltaftilde}. The resulting bound is \n\\be\nx^*\\lesssim4.5\\,.\n\\label{eq:Veneziano}\n\\ee\n\n\\begin{figure}[t!]\n \\centering \n \\includegraphics[width=0.7\n \\columnwidth]{Su2nf0.pdf} \n\\caption{ Values of $\\widetilde{F}_\\text{conf}$ for pure $SU(2)$ YM as a function of the dimension $d$ computed with the Pad\\'e-approximant [2/1] (purple line), compared to the value of the $5d$ supersymmetric fixed point ${E_1}$, the UV completion of $SU(2)$ SYM gauge theory (red point). }\n\\label{fig:su2}\n\\end{figure}\n\n\n\\subsection{${F}$-Theorem in $d=5$}\nIn this section we extrapolate $\\widetilde{F}$ to $5d$ to test a proposed construction of an interacting CFT that provides a UV completion of $5d$ $SU(2)$ YM theory. Ref. \\cite{BenettiGenolini:2019zth} proposed to construct this CFT as the IR fixed point of a supersymmetry-breaking deformation of the interacting superconformal field theory known as $E_1$ theory \\cite{Seiberg:1996bd}. The latter is known to provide the UV completion of $SU(2)$ supersymmetric YM theory (SYM). Ref. \\cite{BenettiGenolini:2019zth} studied the various phases in the two-dimensional space of relevant deformations of the $E_1$ theory, which includes both the supersymmetric deformation to SYM and the non-supersymmetric one, and suggested the existence of a second-order transition between two phases that are described by $SU(2)$ YM theory and a different symmetry-protected topological order. The CFT capturing this phase transition would therefore be a UV completion of YM, and provide an example of a non-supersymmetric interacting CFT in $d>4$. This scenario was further explored in \\cite{Bertolini:2021cew}, that showed that actually the phase transition should be viewed as separating the YM phase from a phase with spontaneous breaking of the instantonic $U(1)$, and in \\cite{Bertolini:2022osy} where a certain generalization of the theory admitting a large $N$ limit was argued to have a second order transition in that limit.\n\nA possible test for the proposal of ref. \\cite{BenettiGenolini:2019zth, Bertolini:2021cew} relies on the $F$-theorem: the sphere free energy $\\widetilde{F}_{E_1}$ of the SCFT and that of the non-supersymmetric CFT $\\widetilde{F}_{\\text{CFT}}$ should satisfy $\\widetilde{F}_{E_1} > \\widetilde{F}_{\\text{CFT}}$. The quantity $\\widetilde{F}_{E_1}$ has been computed using localization in \\cite{Chang:2017cdx}. It is natural to conjecture that the non-supersymmetric fixed point is the continuation to $d=5$ of the UV fixed point visible in the $\\epsilon$ expansion in $d=4+2\\epsilon$, and therefore to estimate $\\widetilde{F}_{\\text{CFT}}$ by an extrapolation of our result $\\eqref{eq:fFinal}$. An evidence for the persistence of the $d=4+2\\epsilon$ fixed point up to $d=5$ was obtained in ref.~\\cite{DeCesare:2021pfb} using the five-loop $\\overline{{\\rm MS}}$ $\\beta$-function and Pad\\'e-Borel resummation techniques, both for the pure $SU(2)$ YM theory and for the theory with $n_f$ fundamental Dirac fermions, with $n_f\\leq 4$. Note that the continuation from $d=4+2\\epsilon$ suggests that the critical point should separate a free YM phase from a confined phase (the only phase realized in $d=4$) rather than a second YM phase, similarly to the refined proposal of \\cite{Bertolini:2021cew} and in agreement with a recent lattice study that sees hints of a second order confinement/deconfinement transition \\cite{Florio:2021uoz}.\n\\begin{table}\n\\centering\n\\begin{tabu}{|c|c|c|c|c|c|c}\n\\hline$n_f$ & 0 & 1 & 2& 3 &4 \\\\\n\\hline \\rowfont{\\color{red}}$E_{n_f+1}$ & $5.097$ & $6.140$ & $7.395$ & $8.959$ & $11.007$ \\\\\n\\hline [2,1] & $4.8$ & $5.1$ & $5.4$ & $5.7$ &$6.2$ \\\\\n\\hline\n\\end{tabu}\n\\caption{Comparison between the value of $\\widetilde{F}_{E_{n_f+1}}$ (red) and the $[2,1]$ Pad\\'e approximant of $\\widetilde{F}_\\text{conf}$ in $d=5$ (black) as a function of $n_f$ for $0\\le n_f\\le 4$.}\n\\label{tab:3}\n\\end{table}\nWe therefore proceed to extrapolate $\\widetilde{F}_\\text{conf}$ using the only available Pad\\'e approximant that is constrained also by the $d=6$ boundary condition \\eqref{eq:d6bc} and without poles in the interval $4\\leq d \\leq 6$. In fig.~\\ref{fig:su2} we plot the resulting extrapolation of $\\widetilde{F}_\\text{conf}$ as a function of the dimension. The value ranges between a local minimum of $\\sim 2.9$ and a maximum of $\\sim5.0$, before turning negative in the vicinity of $d=6$. The value in $d=5$ is $\\sim4.8$, remarkably close to the known value $\\sim5.1$ of $\\widetilde{F}$ in the $E_1$ theory, and below it consistently with the proposals of \\cite{BenettiGenolini:2019zth, Bertolini:2021cew}. \n\nThe UV completion of the supersymmetric theory is also known in the case with $0\\widetilde{F}_\\text{conf}$, consistently with the existence of the RG flow. \n\n\\section{Conclusion}\n\nIn this paper we obtained the NLO result for the free energy on $S^d$ in non-abelian gauge theories in Euclidean $d$ dimensions evaluated at their perturbative fixed point. We extrapolated the result to compute the quantity $F$ for the corresponding CFTs in $d=3$ or $d=5$ and used our best estimates together with the monotonicity property of $F$ to test the existence and/or proposed constructions of these CFTs.\n\nWhile successful in many contexts, the $\\epsilon$ expansion is not a rigorous method. Going forward, it would be interesting to assess its reliability in the context of gauge theories. A possible verification could come from comparison with lattice and/or conformal bootstrap results. To that end it would be useful to compute scaling dimensions of operators in addition to those obtained in \\cite{DeCesare:2021pfb}, or to improve the precision of the predictions by computing at higher loop order. \n\nAnother possibility is to apply the $\\epsilon$ expansion to cases in which the existence of a fixed point, and the associated data, are known from other methods such as supersymmetry or holography. For instance, one could apply it to the $4d$ theory with the same matter content as $5d$ $\\mathcal{N}=1$ SU(2) SYM with $n_f$ fundamental flavors, and check if $\\epsilon$ expansion finds a UV fixed point that extrapolates to the $E_{n_f}$ SCFT in $5d$. Note that when continuing the fields to $4d$ one does not land on a supersymmetric theory: the $5d$ vector multiplet contains a real scalar, a $5d$ vector, and a symplectic Majorana fermion, all in the adjoint representation, and their continuation to $4d$ gives rise to a real scalar, a $4d$ vector, and a Dirac fermion, which is not the content of a supersymmetric theory in $4d$.\\footnote{At least, this is the case with the method we are currently using to continue vector fields. One could imagine a different continuation, more in the spirit of DRED \\cite{Siegel:1979wq}, in which the number of components of the vector is kept fixed. In this putative approach, $3d$ gauge theories with matter would not be obtainable with $\\epsilon$ expansion, because the vector in $4d$ would give rise to additional scalars coupled to matter fields (and gauge fields as well, in the non-abelian case).} \nAs a result, supersymmetry is expected to emerge only in the limit $d\\to5$. To check the existence of fixed points in $d=4+2\\epsilon$ one then needs the coupled system of $\\beta$ functions for the gauge coupling in the presence of both fermionic and bosonic adjoint matter, and of the Yukawa coupling, see e.g. the Lagrangian (15) in \\cite{Mirabelli:1997aj}. Note that these $\\beta$ functions are known at lower loop order compared to the case with only fermionic matter that was used in \\cite{DeCesare:2021pfb}, see \\cite{Davies:2021mnc, Bednyakov:2021qxa}. \nWe leave this as direction for future studies.\n\nThe perturbative expansion of the free energy is insensitive to the global structure of the gauge group, except the log term in eq.~\\eqref{eq:fFinal} where the volume of the gauge group appears. \nIt would be interesting to compare our results for $F$ with those computed using localization (or some other method) in SCFTs based on gauge theories such as $PSU(n_c) = SU(n_c)/{\\bf Z}_{n_c}$. \nThis analysis might be useful to shed some light on the nature of the transition delimited by our fixed points for $d>4$, since a confinement/deconfinement transition has the one-form symmetry ${\\bf Z}_{n_c}^{(1)}$ as order parameter, while the latter is gauged in $PSU(n_c)$ theories and replaced by an emergent magnetic symmetry.\n\n\\section*{Acknowledgments}\n\nWe thank Francesco Benini, Matteo Bertolini, Simone Giombi, Francesco Mignosa, Jesse van Muiden and Yifan Wang for useful discussions. Work partially supported by INFN Iniziativa Specifica ST\\&FI. LD also acknowledges support by the program ``Rita Levi Montalcini'' for young researchers. \n\n\n\n"} +{"id": "red-arxiv-9", "source_id": "red-arxiv_9_red-arxiv-9", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\\label{sec:Introduction}\n\n\n\n\n\n\\IEEEPARstart{R}{adio maps} are representations of quantities of interest for wireless communications applications, in a fine spatial grid. One traditional quantity of interest is the so-called pathloss.\n\nThe \\emph{pathloss} (or \\emph{large-scale fading coefficient}), quantifies the loss of wireless signal strength between a transmitter (Tx) and receiver (Rx) due to large scale effects. The signal strength attenuation can be caused by many factors, such as free-space propagation loss, penetration, reflection and diffraction losses by obstacles like buildings and cars in the environment. In dB scale pathloss amounts to $\\textup{P}_{\\textup{L}} = (\\textup{P}_{\\textup{Rx}})_{\\rm dB}-(\\textup{P}_{\\textup{Tx}})_{\\rm dB}$, where $\\textup{P}_{\\textup{Tx}}$ and $\\textup{P}_{\\textup{Rx}}$ denote the transmitted and received locally averaged (over multipath, small-scale phenomena) power (also called Received Signal Strength - RSS) at the Tx and Rx locations, respectively. Throughout the paper, we also use the term \\emph{pathgain}, when we refer to the reciprocal of pathloss (i.e., the pathloss multiplied by -1 in dB scale).\n \nMany applications in wireless communication explicitly rely on the knowledge of the pathloss function, such as device-to-device (D2D) link scheduling, or user-cell site association. For example, in the latter, the goal is to assign a set of wireless devices to a set of cellular base stations, and in order to decide which device to assign to which station, it is important to know the radio map.\n \n Some other use cases of pathloss radio maps are fingerprinting-based localization methods where the fingerprint is the signal strength (RSS) from different ``anchor'' infrastructure nodes (e.g., base stations or access points) \\cite{ICASSP,LocUNetArXiV}, \n physical-layer security, power control in multi-cell massive MIMO systems, user pairing in MIMO-NOMA systems, precoding in multi-cell large scale antenna systems, path planning, and activity detection (See e.g. the references in \\cite{RadioUNetTWC}). \n\n\nIn the following, we present datasets of simulated radio maps based not only on pathloss but also on time of arrival (ToA), at each point in a fine spatial grid, in large dataset of city maps.\nTo the best of our knowledge, there is currently no such publicly-available dataset.\nThe fact that we extract the signal strength and ToA from the same simulation in each of the environments is a key-point, which enables using our dataset for fair comparisons of wireless communication methods that are based on the RSS and those that are based on ToA. We hence believe that our dataset will be useful for the research community in this area.\n\n\nThe high accuracy of the software we used for our simulations (the ray-tracing software WinProp from Altair \\cite{WinPropFEKO}) was demonstrated by field measurements in many cities such as Helsinki, Munich, Nancy, Stuttgart, and Hong Kong, (see \\cite{IRTMANET,DPM} and the references therein). Moreover, such simulation methods are frequently used by e.g. cell operators, proving their efficiency.\n\nUsing such computer simulations allowed us to generate high volumes of data, through which many studies are made possible. Moreover, applications like wireless localization, which has been an important motivation for our endeavours, benefit from the high resolution of the radio maps, as the grid size implies a minimum error in the accuracy of localization. Our presented datasets with their 1m resolution are suitable for studies of high accuracy localization methods, whereas constructing such radio maps through measurement campaigns would be extremely difficult.\n\n\n\n\n\n\n\nIn the following, we explain the fundamentals on which all the presented datasets are based, followed by their individual descriptions and use cases.\n\\section{General Setting}\\label{sec:Common}\n\n\n\nWe employ a collection of propagation models, with various complexities, given by \\emph{WinProp}\\cite{WinPropFEKO}, on a dataset of urban environments. \nWe consider 701 city maps of size $256 \\times 256$ square meters, which were fetched from \\emph{OpenStreetMap} \\cite{OpenStreetMap} from the cities Ankara, Berlin, Glasgow, Ljubljana, London, and Tel Aviv.\nIn each map, 80 transmitter locations are considered, amounting to a total of 56080 simulations for each propagation model. We divide the simulations to two types: \\emph{ground level}, where the 80 Tx are located at ground level (1.5m) for each map, and \\emph{rooftop}, where the 80 Tx are located on the rooftops of the buildings. The former simulations correspond to the D2D setting, where the simulation is essentially 2D, and the latter simulations correspond to the base-station setting, where 3D considerations are essential for the simulation. \n\n\n\n\n\nWe used two types of radio map simulation methods -- \\emph{Dominant Path Model (DPM)} \\cite{DPM} and \\emph{Intelligent Ray Tracing (IRT)} \\cite{IRT}. In IRT simulations, the maximum number of interactions of the rays with the environment is a parameter that effects the complexity/accuracy of the simulations. We used IRT with 2 and 4 interactions, which we call \\emph{IRT2} and \\emph{IRT4}, respectively. All buildings (and cars) are assumed to have the same generic material property, for simplicity.\n\nAnother parameter of the IRT simulations that affects the fineness of the reflection patterns is the length of the segments/tiles of the objects (buildings and cars in our datasets).\n\n\nAll simulations were saved with a resolution of 1m per pixel, as \\texttt{.png} images. Furthermore, the data describing the simulation settings, i.e., the transmitter locations, city maps, and cars (when applicable) are provided as images, together with their corresponding coordinates/shapes (as polygons) in \\texttt{.json} files. The roads are saved as images and as polygonal lines. A detailed list of the contents of the presented datasets can be found at \\cite{DataPort}.\n\n\n \n The pathloss values in the dataset were truncated below a minimum pathloss value, and the range between this minimum pathloss value and the maximum pathloss value over the whole dataset was scaled to gray levels between 0 and 255, to save the pathloss radio map simulations as images. \nNotice the importance of putting emphasis on high pathgains, since the applications that use the pathloss values generally would favor communication links with higher pathgains. Hence, high pathgain values should be represented by gray levels close to 255 (maximum).\n\\begin{figure}[!t]\n \\centering\n \n \\includegraphics[width=.45\\textwidth]{allTogether.png}\n \\caption{A partial visual summary of the presented datasets and their applications. Shown are: City maps w/o and w/ height encoding, roads and cars, pathloss radio map prediction and simulation under different settings, and the result of a localization experiment with LocUNet \\cite{LocUNetArXiV}.}\n \\label{fig:allTogether}\n \n\\end{figure}\nAlso, note that one should only consider the points where the received signal power (RSS) $(\\textup{P}_{\\textup{Rx}})_{\\rm dB} = \\textup{P}_{\\textup{L}}+ (\\textup{P}_{\\textup{Tx}})_{\\rm dB}$ lies above the noise floor $(\\mathcal{N})_{\\rm dB}$, such that the transmitted signal can be reliably detected. In other words, pathloss values which lead to received signal powers below the noise floor, should be omitted. In the Appendix \\ref{sec:AppTrunc}, we briefly showcase how we found our truncation values (which we called \\emph{pathloss threshold} and \\emph{analytic pathloss threshold}), which might be beneficial for users of the dataset.\n The parameters and the found pathloss thresholds are summarized in Table \\ref{table:parameters}.\n\n\n\\begin{table}[!t]\t\n\t\\renewcommand{\\arraystretch}{1}\n\t\\centering\n \\caption{Parameters of the datasets}\n\t\\scalebox{0.8}{\n\t\\begin{tabular}{cc}\n\t\n\t\t\\rowcolor{Gray}\t\t\\bfseries Parameter & \\bfseries Value\\\\\n\t\n \\rowcolor{Grayy}\\multicolumn{2}{c}{\\bfseries \\quad \\quad Common Parameters in the 2D and 3D Datasets}\\\\\n\t Map size & $256^2$ pixels\\\\\n\t\n\t\tPixel length & 1 meter\\\\\n\t\n\t\t Rx height & 1.5 meters\\\\\n\t\n\t\t\n\t\n\t\n\t\tNoise power spectral density & -174 dBm/Hz\\\\\n\t\n\t\tTransmit power & 23 dBm\\\\\n Antenna type & Isotropic\\\\\n\t\n \\rowcolor{Grayy}\\multicolumn{2}{c}{\\bfseries \\quad \\quad Parameters of the 2D Datasets}\\\\\n Building height & 25 meters\\\\\n Tx height & 1.5 meters\\\\\nCenter carrier frequency & 5.9 GHz\\\\\n\t\n\t\tChannel bandwidth & 10 MHz\\\\\n Noise figure & 0 dB\\\\\n\t\n Min PL in the simulations & -186 dB\\\\\n Max PL in the simulations/dataset & -47 dB\\\\\n\t\tPL threshold & -127 dB\\\\\n PL range & 80 dB\\\\\n PL threshold (analytic, dataset) & -147 dB\\\\\n Simulation type & DPM / IRT\\\\\n Max no. of interactions in IRT & 2 / 4\\\\\n \n\t\t\n Tile length in IRT simulations & 100 / 10 meters\\\\\n Simulations with cars in the map & No / Yes\\\\\n \\rowcolor{Grayy}\\multicolumn{2}{c}{\\bfseries \\quad \\quad Parameters of the 3D Dataset}\\\\\n Height range of the buildings & 6.6-19.8 meters\\\\\n Tx height & 3 m above the rooftop \\\\\n Center carrier frequency & 3.5 GHz\\\\\n Channel bandwidth & 20 MHz\\\\\n Noise figure & 20 dB\\\\\n Min PL in the simulations & -162 dB\\\\\n Max PL in the simulations/dataset & -75 dB\\\\\n \n\t\tPL threshold & -104 dB\\\\\n PL range & 29 dB\\\\\n PL threshold (analytic, dataset) & -111 dB\\\\\n \n Simulation type & IRT\\\\\n Max no. of interactions in IRT & 2\\\\\n Tile length in IRT simulations & 10 meters\\\\\n Simulations with cars in the map & No \n\t\\end{tabular}\n\t}\n\t \\label{table:parameters}\n\\end{table}\n\n\n\\section{2D Datasets}\\label{sec:2D}\n\nIn this section, we present datasets in the 2D setting, i.e., Tx deployed in the street level, 1.5m above ground, having the same height as the $256\\times 256$ receiver pixels. \nWe first introduce the \\emph{RadioMapSeer Dataset}, the first pathloss radio map dataset that we generated and used in our RadioUNet work \\cite{RadioUNetTWC}. \nThen, we present two additional IRT datasets, which were generated to provide radio maps with more fine-grained reflection patterns, in the same city maps and Tx locations as in \\emph{RadioMapSeer}. \nLast, we present two datasets (pathloss and ToA radio maps) designed specifically for wireless localization application. \n\nFor consistency and fair comparisons, in all 2D datasets, the same city and car maps, and the same Tx locations were used. Tx were restricted to be positioned within the $150 \\times 150$ area in the center of the $256 \\times 256$ city map and to be separated by at least 20m from each other.\n\n\\subsection{2D Radio Map Versions with Cars}\n\nFor each simulation models (i.e., DPM and IRT under various complexities) in 2D scenarios, we provide a pathloss radio map dataset version with the presence of cars in the environment. The specifications of these simulations are summarized as follows.\n\n\\begin{itemize}\n \\item For each of the 701 city map, 100 cars of size $2 \\times 5 \\times 1.5$ (width $\\times$ length $\\times$ height) are added (if the roads of the corresponding maps are of sufficient length to fit them, otherwise the maximum number that can fit), with adopting a random generation procedure near and along or perpendicular to roads. Specifically, we uniformly randomly picked road segments and randomly placed \ncars either parallel or perpendicular to the chosen segments. We note that length of the segments are\ndifferent and as a result shorter segments have more cars per segment length in general. This\nprovides in fact a more realistic placement of the cars, since shorter segments are mostly due\nto curves in roads and more traffic congestion in curvy roads is expected.\n \\item We calculated the effects of cars by running DPM simulations with the presence of cars, subtracting such results from DPM simulation results without cars, and by scaling this difference by $0.5$. We arrived at this modeling decision as a result of a discussion with an expert.\n \\item The found effect of cars are then added to the radio maps simulated by the mentioned methods to generate their versions with cars.\n\\end{itemize}\n\n\\vspace{-1.5mm}\n\n\\subsection{The RadioMapSeer Dataset}\n In this dataset, the simulation parameters were chosen as follows.\n\n\\begin{itemize}\n \\item DPM in the generic simulation setting, run for the 701 city maps with 80 different Tx locations (a total of 56080 simulations).\n \\item IRT with 2 (either a diffraction or reflection) and 4 maximum interactions of rays , where the tiling size was set to 100m. We consider 80 and 2 Tx per map for IRT2 and IRT4 respectively, i.e., a total of 56080/1402 IRT2/IRT4 simulations.\n\\end{itemize}\n\n\n\nTo represent scenarios where the city map is inaccurate, additional versions of the considered 701 city maps are provided, where in each map of the original city map, one to four randomly chosen buildings are removed. Six experiments of random removals for each city are provided.\n \nThe main application of \\emph{RadioMapSeer Dataset} studied in detail in our RadioUNet \\cite{RadioUNetTWC} was RSS radio map estimation, e.g. achieving high accuracy radio maps predictions, but in much shorter time than a ray-tracing software. \nThe presented method can take two versions of inputs. In the first case, only the city map and the Tx location image are given as inputs, similar to a simulation setting of a ray-tracing software. In the second version, samples from the ground truth radio map are used as additional input features. This version allows one to adapt to the real propagation phenomena, when e.g. the available simulation method (used for the initial supervision) or the environment map is not accurate enough. Note that, in both cases, RadioUNet can yield very accurate results, in previously unseen city environment and Tx deployment scenarios.\n\nOne interesting observation is worth mentioning here: When studying the scenarios with missing buildings in the city maps, RadioUNet demonstrated an ability to infill their pixels, when RSS values from the radio map were sparsely sampled to compensate for the mismatched knowledge about such buildings. Here, the detection of the missing buildings itself can be of interest, exemplifying an unexplored potential use case of the presented dataset. Further details and other example applications of this dataset can be found in \\cite{RadioUNetTWC}.\n\n\n\n\n\n\n\\subsection{Additional Higher Accuracy 2D Pathloss Radio Map Datasets}\n\t\nTwo more pathloss radio map datasets with finer reflection patterns were later generated, which were obtained with IRT simulations with more precision and higher computational complexity. Such finer patterns with sharp pathloss transitions can be of interest for some sensitive applications.\nIn these pathloss radio map datasets, the tile length of the elements in the map (buildings, and cars if applicable) is reduced from 100m (which resulted in having only one tile for the most of the building walls) to 10m, promoting the finer reflection phenomena in the simulations. \n\n The first dataset is based on IRT2 simulations, with multiple interactions, i.e., a total of 2 maximum interactions for rays (recall the IRT2 setting in RadioMapSeer allowed for either a reflection or diffraction), whereas the second dataset was obtained with IRT4 simulations.\n\n\n\n\n\\subsection{Datasets Specialized for Localization}\n\n\n\nWe have also generated two new datasets to allow for comparisons among RSS (pathloss) and ToA ranging-based localization algorithms in realistic dense urban settings, which we called \\emph{RadioLocSeer Dataset} and \\emph{RadioToASeer Dataset}. \n\n\n\nIn dense urban areas, localization methods that rely on distance (range) estimations (between Tx and the Rx to be located) experience drastic performance deterioration, because of the unavoidable errors in the range estimations in an urban environment. Such errors occur due to the interactions of the rays with the obstacles present in the environment, such as the buildings, cars, pedestrians, incurring signal strength loses and delays, which results in very inaccurate distance estimations. Hence, radio map (also called \\emph{fingerprint})-based methods are preferred over the methods which explicitly make use of such (mismatched) distance estimations. \n\nThe presented \\emph{RadioLocSeer Dataset} was designed to study RSS (pathloss) radio map-based localization methods. \n\n\n\\subsubsection{RadioLocSeer Dataset}\\label{subseq:Dataset_RadioLocSeer}\n\nAn important motivation to generate this dataset was that localization methods do not have access to the ground truth radio maps, and instead make use of fast pathloss radio map estimation methods, such as the deep learning method \\emph{RadioUNet} \\cite{RadioUNetTWC} (Please see \\cite{ICASSP,LocUNetArXiV} for details). Hence, in the presented localization datasets we provide estimated radio maps generated by RadioUNet. Here, RadioUnet was supervised on ground truth DPM radio maps over 602 city maps, and then evaluated on 99 test-set city maps. For each ``test-set'' city map and Tx location we provide simulated pathloss radio maps under all the different simulations we explained for the 2D setting. This dataset contains a total of 7920 such radio map estimations per simulation setting, in 99 city maps and 80 Tx locations per map. Being specialized for the localization task, the pathloss radio maps in this dataset are provided as truncated at the found \\emph{pathloss threshold}, i.e., at -127 dB (cf. Table \\ref{table:parameters}), instead of the \\emph{analytic pathloss threshold} (cf. Appendix \\ref{sec:AppTrunc}). DPM radio map estimations by RadioUNet \\cite{RadioUNetTWC} in this subset (which was unseen during its training) of maps (amounting to a total of $99 \\times 80 = 7920$ estimations) are also included in the dataset. \n\n \t\\begin{table}[!t]\n\t\\renewcommand{\\arraystretch}{1}\n\t\\centering\n \\caption{\\small Comparison of the performance of the pathloss radio map-based LocUNet \\cite{ICASSP,LocUNetArXiV} with ToA ranging-based methods. Mean absolute error accuracies of the compared algorithms under different additional noise and number of Tx in the map.}\n \\scalebox{0.7}{\n\t\\begin{tabular}{c|c|c|c|c}\n\t\\hline\n\t\t\\rowcolor{Gray} {\\cellcolor{Grayyy} \\bfseries no Tx:}&$\\mathbf{5}$&$\\mathbf{3}$&$\\mathbf{5}$&$\\mathbf{3}$\\\\\n\t\t\\hline\t\n\t\t\\rowcolor{Gray} {\\cellcolor{Grayyy} \\bfseries Standard dev. of noise:}&\\multicolumn{2}{c|}{ $\\sigma = 0$} & \\multicolumn{2}{c}{$\\sigma = 20$}\\\\\n\t\t\\hline\n\t\tPOCS \\cite{POCSgholami2011wireless} & $37.75$ & $46.15$ & $41.27$ & $48.72$ \\\\\n\t\t\\hline\t\t\n\t\tSDP \\cite{SDP}& $\\mathbf{6.81}$ & $\\mathbf{13.95}$ & $24.88$ & $41.02$ \\\\\n\t\t\\hline\n\t\tRobust SDP \\cite{SDPR}, $b=20$ & $9.98$ & $17.10$ & $27.76$ & $40.37$ \\\\\n\t\t\\hline\n\t\tRobust SDP \\cite{SDPR}, $b=0.7$ & $7.04$ & $15.38$ & $28.42$ & $41.74$ \\\\\n\t\n\t\n\t\n\t\n\t\t\\hline\n\t\tBisection-based robust method \\cite{BisecRob}, $b=20$ & $9.16$ & $15.87$ & $\\mathbf{23.30}$ & $\\mathbf{38.14}$\\\\\n\t\t\\hline\n\t\tBisection-based robust method \\cite{BisecRob}, $b=0.7$ & $9.49$ & $14.95$ & $24.09$ & $40.75$ \\\\\n\t\t\\hline\\hline\n\t\tLocUNet \\cite{ICASSP,LocUNetArXiV} Nominal $\\&$ Robustness & $\\mathbf{4.80}$&$\\mathbf{10.70}$ & $\\mathbf{13.14}$& $\\mathbf{19.06}$ \\\\\n\t\\end{tabular}\n\t}\n\t \\label{table:ToAAll}\n\\end{table}\n\n\n\n\t\\subsubsection{RadioToASeer Dataset}\\label{subseq:Dataset_RadioToASeer}\nFor the ranging based methods, ToA information is considered to be more preferable over RSS based-ranging, under the assumption of having access to hardware and protocols that allow for precise ToA measurements. This dataset allows studying such methods in the urban setting. \n\nThis dataset was generated based on DPM simulations, of the same settings as in \\emph{RadioLocSeer}, which provides ToA information of the dominant paths, to allow for fair and consistent comparisons between RSS and ToA ranging-based methods in realistic urban scenarios. \n\n\n In the Appendix \\ref{sec:AppToAOpt}, we explain how the ToA value of a dominant ray path (evaluated from the DPM simulation) constitutes a (quasi) lower bound on the true ToA. This ultimately means that evaluating the accuracy of ToA localization methods on \\emph{RadioToASeer} gives optimistic (upper bound) estimates of the actual accuracy of an ToA ranging-based localization method. Hence, showing that an RSS localization method outperforms a ToA ranging-based localization method on our dataset is a strong evidence that the RSS outperforms the ToA method in dense urban settings, but the converse is not true. \n\n Examples of the usage of these datasets can be found in \\cite{ICASSP,LocUNetArXiV}, where state-of-the-art RSS and ToA-ranging based methods were compared, to the best of our knowledge, for the first time in the literature. Moreover, in these works we presented a RSS radio map and deep learning-based method called \\emph{LocUNet}, which essentially uses the pathloss radio map estimations (e.g. from RadioUNet for fast results) and the RSS measurements of the Rx from the Tx. We report in Table \\ref{table:ToAAll} the numerical comparisons between the LocUNet and the state-of-the-art ToA ranging-based methods (cf. \\cite{ICASSP,LocUNetArXiV} for the details), demonstrating LocUNet's superior performance over the ToA ranging-based methods in various settings. \n\n\n\n \\setlength{\\medskipamount}{5.2pt plus 0.1pt minus 0.1pt\n\n\n\n\n\n\n\n\n\\begin{figure}[!t]\n \\centering\n \n \\includegraphics[width=.48\\textwidth]{sims13DScene.png}\n \\caption{A simulated map from the presented 3D pathloss radio map dataset. The rays arriving at a chosen pixel are shown. Tx mounted on the rooftop of a high building}\n \\label{fig:3D_Rays}\n \n\\end{figure}\n\n\\section{3D Dataset}\\label{sec:3D}\n\nLast, we extend our simulations to the 3D setting, where varying building heights and Tx deployment on rooftops are considered. This dataset could find use in studying e.g. cellular scenarios, where the transmitters are placed on the rooftop. Notice that over the rooftop propagation gives rise to richer and more complicated pathloss patterns than in the 2D case (see Fig. \\ref{fig:3D_Rays} for a simulated radio map example, which is also contained in our dataset after the appropriate post-processing). Thus, to study or design deep learning methods which perform well in the 3D setting, 3D radio map datasets are required. The presented 3D dataset has the following specifications:\n\n\\begin{itemize}\n\\item The simulations are conducted in the setting of IRT with multiple ray interactions (max. 2), with 10m tiling length of the building elements.\n\\item The same (as in the previous 2D cases) 701 $256 \\times 256$ city maps are used. Each building in a city map is assigned a height that lies between 2 to 6 stories, where a story is taken as 3.3m. This range of 13.2m (from the minimum of 6.6 meters to the maximum of 19.8m) is divided into 255 equal length levels and building heights are found by picking one of these levels uniformly. This data is provided in two image sets, one as black and white (BW) images of the pixels occupied by buildings, and one with their encoded height as gray levels. As in the previous datasets, the corresponding polygons (2.5D) in \\texttt{.json} format are provided. \n \\item Transmitters are generated on the buildings that have a height of at least 5 stories (16.5m). The transmitters are placed close to the edges to reflect the realistic deployment. The transmitter height from the rooftop is set to 3m. We have restricted the Tx to be positioned within the $150 \\times 150$ area in the center of the $256 \\times 256$ city map if possible, and considered a larger area of $230 \\times 230$ for the city maps when this was not possible, due to lacking buildings (above which the Tx could be deployed) in the center.\n\n\\end{itemize}\n\n\n\n\\begin{figure}[!t]\n \\centering\n \n \\includegraphics[width=.48\\textwidth]{3DTogether2.png}\n \\caption{An example application of the 3D dataset. Shown are the height-encoded city map and the Tx (red plus sign), 3D radio map from the dataset, and the radio map predictions by the naive and the 3D adapted RadioUNet \\cite{RadioUNetTWC} variants.}\n \\label{fig:3DTogether}\n \n\\end{figure}\n\nIn Fig. \\ref{fig:3DTogether}, we show an example use of the presented dataset, where we trained RadioUNet with two versions of input: In the first one (naive), only the black-and-white 2D images of the buildings (city map) and Tx were used as input features, whereas in the second case, the height information of the buildings were used as additional input features, through an appropriate usage of the provided height-encoded city images. In particular, we have decomposed the 3D building maps into equal length 2D horizontal \\emph{slices}, such that each slice corresponds to an interval $\\left[\\textup{H}_{\\textup{slice,min}}, \\textup{H}_{\\textup{slice,max}}\\right]$ in the vertical direction, and the height of the buildings are re-scaled in this interval, such that heights below and above this range acquire 0 and 1, respectively, while the intermediate values lie within $\\left(0, 1\\right)$. We observed that 12 equal length slices together with the BW (2D) city map and the Grid Anchor maps introduced in \\cite{transRadio} yielded a good accuracy with RMSE 0.87 dB (cf. Fig. \\ref{fig:3DTogether}, right), outperforming the naive approach, which had resulted in an RMSE of 1.26 dB.\n\nThe example in Fig. \\ref{fig:3DTogether} demonstrates the capability of the 3D-adapted method to learn the effect of different building heights, on the pathloss radio maps, as witnessed by the different shadow lengths appearing behind the buildings with different heights, complying with the simulations by the ray-tracing software and real propagation phenomena. However, with the naive approach, such differences are lost.\n\n\n\n\\section*{Acknowledgments}\nWe thank Ibrahim Rashdan for a fruitful discussion on the impact of cars on the pathloss function. The work presented in this paper was partially funded by the DFG Grant DFG SPP 1798 “Compressed Sensing in Information Processing” through Project Massive MIMO-II, and by the German Ministry for Education and Research as BIFOLD (ref. 01IS18037A). \n\n\\begin{appendices}\n \n\n\\label{appendix}\n\n\n\n\t\n\n\n\n\n\n\n\n\n\t\n\n\\section{Determining the Radio Map Truncation Values}\\label{sec:AppTrunc}\nIn the following, we provide a demonstration of how we found the pathloss truncation values (cf. Sec. III-B in \\cite{RadioUNetTWC} for more details).\nFirst note that one should consider the points where the received signal power $(\\textup{P}_{\\textup{Rx}})_{\\rm dB} = \\textup{P}_{\\textup{L}}+ (\\textup{P}_{\\textup{Tx}})_{\\rm dB}$ lies above the noise floor $(\\mathcal{N})_{\\rm dB} $, i.e. the points where $(\\textup{P}_{\\textup{Rx}})_{\\rm dB} \\geq (\\mathcal{N})_{\\rm dB}$ holds, where $(\\mathcal{N})_{\\rm dB} = 10\\log_{10} W N_0 + \\textup{NF}$ is the noise floor in dB, with NF being the noise figure. Solving this for pathloss $\\textup{P}_{\\textup{L}}$ we get the \\emph{pathloss threshold} $\\textup{P}_{\\textup{L,thr}}$ as\n $\\textup{P}_{\\textup{L}}\\geq \\textup{P}_{\\textup{L,thr}} = -(\\textup{P}_{\\textup{Tx}})_{\\rm dB} + (\\mathcal{N})_{\\rm dB}.$ Even though any signal below the noise floor cannot be detected in reality, some applications might benefit from having pathloss simulation values that lie below the pathloss threshold (e.g. the coverage classification application presented in \\cite{RadioUNetTWC}). Hence, the radio maps in the presented dataset are truncated at a lower threshold $\\textup{P}_{\\textup{L,trnc}} < \\textup{P}_{\\textup{L,thr}}$, where we chose $\\textup{P}_{\\textup{L,trnc}}$ such that the difference between the maximum pathloss $M_1$ in the dataset and $\\textup{P}_{\\textup{L,thr}}$ is approximately four times greater than the difference between $\\textup{P}_{\\textup{L,thr}}$ and $\\textup{P}_{\\textup{L,trnc}}$, i.e., $M_1-\\textup{P}_{\\textup{L,thr}} = 4(\\textup{P}_{\\textup{L,thr}}-\\textup{P}_{\\textup{L,trnc}})$. We dub $\\textup{P}_{\\textup{L,trnc}}$ the \\emph{analytic pathloss threshold}. \n Considering all the above mentioned points, the pathloss values in the radio maps were calculated by $f=\\max\\{\\frac{\\textup{P}_{\\textup{L}}-\\textup{P}_{\\textup{L,trnc}}}{M_1-\\textup{P}_{\\textup{L,trnc}}},0\\}$, with $M_1$ denoting the maximal pathloss in all simulated radio maps. Hence, $f=0$ represents anything below the analytic pathloss threshold, and $f=1$ represents the maximal pathgain at the transmitter. We note that there might be other considerations in the link budget calculation, such as a minimum required SNR level, which could be similarly incorporated in the above calculations. Following the above ideas, users of the presented datasets who are interested in settings with more stringent requirements, such as higher SNRs, can truncate the radio maps at higher threshold values.\n\n\n\n\\section{On the (Quasi) Optimality of the ToA Dataset}\\label{sec:AppToAOpt}\n\t\n\t\n\nIn this section, we argue that evaluating ToA-based localization algorithms using \\emph{RadioToASeer} yields upper bounds for their performances in real deployment. Please see \\cite{LocUNetArXiV} for a more detailed discussion.\n\nFirst, we note that the dominant path is the shortest free space path connecting the Tx and the Rx.\n\n\\begin{figure}[!t]\n\t\t\\vspace{-2mm}\n\t\t\\centering\n\t\t\\includegraphics[width=0.75\\linewidth]{testECDFLargerrFontGrid.png}\n\t\n\t\t\n\t\t\\vspace{-2mm}\n\t\t\\caption{\\small Empirical CDF of the difference between the direct and dominant path in meters.}\n\t\t\\label{fig:eCDF}\n\t\\end{figure}\n\n\t\tWe call the straight line connecting Tx and Rx, which may go through obstacles (in our dataset, these are buildings and cars), the \\emph{direct path}.\n\tThe difference (error) between ranging with ToA measurement of the ray (range is calculated as: ToA $\\times$ speed of light) and the direct path is called the \\emph{NLOS (non-line-of-sight) bias}, which is by definition non-negative (zero when the link is in LOS, positive otherwise). The empirical cumulative distribution function (CDF) of the NLOS bias of the dataset is shown in Fig. \\ref{fig:eCDF}. We see that $47\\%$ of the links in the \\emph{RadioToASeer} are in LOS.\n \n\tWe argue that even though the dominant path may be a NLOS path, and hence introduces an NLOS bias, it is quasi-optimal to use the ToA of such paths in ToA ranging-based algorithms. First, if the dominant path is in LOS, then it is by definition the shortest path and the bias is zero. Second, in the NLOS situation, the NLOS bias of a dominant path is lower than that of a potential free space path between the same Tx and Rx, that undergoes reflections to reach its destination. Thus, relying on ToAs of the dominant paths would yield better performances than the ToAs of the other free space paths.\n\t\n\tNotice that in NLOS conditions using the estimated range (length) of the direct path would also incur several problems, due to the penetration of the obstacles. In an urban scenario, the associated direct path of an NLOS link is usually subject to blockage by numerous buildings and empirical evidence shows that loss due to penetration through a building is around $15-20$dB \\cite{585GHzRappaport}. Hence, the received power of an NLOS direct path may go easily below the detection threshold for devices with regular noise figures and SNR requirements. \n\n Furthermore, as shown in \\cite{LocUNetArXiV}, the resolvability of such a direct path is very unlikely. So, it is reasonable to use the dominant paths instead of the direct paths. \n\t\t\n\t Thus, overall, evaluating the ToA ranging based-methods on this dataset yields essentially a best case of what is possible with ToA ranging in an urban environment. \n\n\\end{appendices}\n\\bibliographystyle{IEEEtran}\n\n\n \n \n"} +{"id": "red-arxiv-10", "source_id": "red-arxiv_10_red-arxiv-10", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\n\\section{QMC simulations and the disorder operator}\n\\label{sec:qmc}\n\n\\subsection{Quantum Monte Carlo implementation}\nIn this work, we use the ALF~\\cite{ALFSciPost_v2} implementation of DQMC at finite-temperature. We used a symmetric Suzuki-Trotter decomposition to control the systematic error in observables and adopted an imaginary time step $\\Delta\\tau t=0.2$. \n We set the parameter $t=1$ and scale the inverse of temperature\n$\\beta$ as $\\beta=L$ in the calculation to access the thermodynamic\nlimit. The coupling strength $\\lambda$ is the parameter we tune in calculation. The disorder operators are defined in parallelogram\nregion in honeycomb lattice as shown in Fig. 1 (c) in the main text.\n\nThe measurement of the disorder operator in DQMC can be implemented as \\cite{Assaad08_rev}\n\\begin{align}\n\\left\\langle X\\left(\\theta\\right)\\right\\rangle & =\\left\\langle \\prod_{\\boldsymbol{i}\\in M}\\exp\\left(i\\hat{Q}_{\\boldsymbol{i}}\\theta\\right)\\right\\rangle =\\frac{1}{Z}\\text{Tr}\\left\\{ e^{-\\beta H}e^{i\\hat{c}^{\\dagger}T\\left(\\theta\\right)\\hat{c}}\\right\\} \\nonumber \\\\\n & =\\sum_{\\left\\{ s\\right\\} }P_{s}\\det\\left(\\boldsymbol{1}+\\Delta\\left(\\theta\\right)\\left(\\boldsymbol{1}-G_{M,s}\\right)\\right)\n\\label{eq:disop_cal_qmc}\n\\end{align}\nfor the fermion bilinear local operator $\\hat{Q}_{\\boldsymbol{i}}$, where $G_{M,s}$ is the block Green's function matrix in subregion $M$ for a given configuration of Hubbard-Stratonovich fields, $s$. Since the Green function matrix is at hand, the disorder operator can be computed on the \nfly without enhancing the computational effort. The matrix elements of $\\Delta\\left(\\theta\\right)=e^{iT\\left(\\theta\\right)}-\\boldsymbol{1}$ dependent on the form of local operator. In our calculation, we consider the local operator to be $\\hat{Q}_{\\boldsymbol{i}}=\\hat{n}_{\\boldsymbol{i}}$ and $\\hat{Q}_{\\boldsymbol{i}}=\\hat{m}_{\\boldsymbol{i}}$. If the Hamiltonian we consider is block diagonal in spin basis, we can write down the expression Eq.(\\ref{eq:disop_cal_qmc}) as the product of the determinant, \n\\begin{equation}\n\\left\\langle X_{c/s}\\left(\\theta\\right)\\right\\rangle =\\sum_{\\left\\{ s\\right\\} }P_{s}\\prod_{\\sigma=\\uparrow\\downarrow}\\det\\left(\\boldsymbol{1}+\\Delta_{\\sigma}^{c/s}\\left(\\theta\\right)\\left(\\boldsymbol{1}-G_{A,\\sigma}\\right)\\right)\n\\end{equation}\nwhere $\\left(\\Delta_{\\sigma}^{c}\\right)_{\\boldsymbol{ii}^{\\prime}\\in M}\\left(\\theta\\right)=\\delta_{\\boldsymbol{ii}^{\\prime}}\\left(e^{i\\theta}-1\\right)$ and $\\left(\\Delta_{\\sigma}^{s}\\right)_{\\boldsymbol{ii}^{\\prime}\\in M}\\left(\\theta\\right)=\\delta_{\\boldsymbol{ii}^{\\prime}}\\left(e^{i\\sigma\\theta}-1\\right)$.\nFor the non-interacting system, $G_{M,s}$ is not dependent on the configuration of Hubbard-Stratonovich fields and the Eq.~(\\ref{eq:disop_cal_qmc}) reduces to Eq.(\\ref{eq:cal_mft_disop}). \n\n\\subsection{Partial particle-hole symmetry}\nIn this part we provide a mapping between the disorder operators $\\left|\\left\\langle X_{c}(\\theta)\\right\\rangle\\right|$ and $\\left|\\left\\langle X_{s}(\\theta)\\right\\rangle\\right|$ under partial particle-hole symmetry. We first introduce the definition of the particle-hole symmetry as $P$: $\\hat{c}_{\\boldsymbol{i}\\uparrow}\\rightarrow \\hat{c}_{\\boldsymbol{i}\\uparrow}$, $\\hat{c}_{\\boldsymbol{i}\\downarrow}\\rightarrow (-1)^{\\boldsymbol{i}}\\hat{c}^{\\dagger}_{\\boldsymbol{i}\\downarrow}$. One can easily obtain the follow relation\n\\begin{align}\nP\\hat{m}_{\\boldsymbol{i}}P^{-1} & =P\\left(\\hat{c}_{\\boldsymbol{i}\\uparrow}^{\\dagger}\\hat{c}_{\\boldsymbol{i}\\uparrow}-\\hat{c}_{\\boldsymbol{i}\\downarrow}^{\\dagger}\\hat{c}_{\\boldsymbol{i}\\downarrow}\\right)P^{-1}\\nonumber \\\\\n & =\\hat{c}_{\\boldsymbol{i}\\uparrow}^{\\dagger}\\hat{c}_{\\boldsymbol{i}\\uparrow}-\\hat{c}_{\\boldsymbol{i}\\downarrow}\\hat{c}_{\\boldsymbol{i}\\downarrow}^{\\dagger}=\\hat{n}_{\\boldsymbol{i}}-1\n\\end{align}\nWe further consider a model hamiltonian with partial particle-hole symmetry such that $P\\hat{H}_0P^{-1}=\\hat{H}_0$. Combining with the above equation we have the relation $\\left\\langle X_{s}\\left(\\theta\\right)\\right\\rangle =e^{-iN_{s}\\theta}\\left\\langle X_{c}\\left(\\theta\\right)\\right\\rangle$, where $N_s$ represent the total number of site inside the region M. Since the measurement of the disorder operators are based on the absolute values $\\left|\\left\\langle X_{c/s}(\\theta)\\right\\rangle\\right|$, the additional phase factor generated by transformation $P$ does not change the result. In summary, the disorder operators $\\left|\\left\\langle X_{c}(\\theta)\\right\\rangle\\right|$ and $\\left|\\left\\langle X_{s}(\\theta)\\right\\rangle\\right|$ are the same in the model with partial particle-hole symmetry.\n\n\n\\section{2d DQCP model}\n\\label{sec:2ddqcp}\nFirst, we focus on the model whose Hamiltonian is introduced in Eq.(1) in the main text. To begin, the mean field limit of the Hamiltonian is discussed. Next, we display our DQMC results. We use the RG invariant quantities to determine $\\lambda_{c1}$ and $\\lambda_{c2}$, consistent with the results of previous papers. The unprocessed simulation results on the disorder operator and fitting results for $\\alpha$ for the interacting case are then provided.\n\n\\subsection{Mean field limit}\n\\label{sec:MFT}\n\nBesides the discussion of the QMC implementation of the disorder operator in the interacting model, in this section, we also provide a simple test by using mean field hamiltonians. For the honeycomb lattice we induced the orders in the phase diagram by including mass terms: \n\\begin{align}\nH_{\\text{MF}}= & -t\\sum_{\\left\\langle ij\\right\\rangle \\sigma}\\left(\\hat{c}_{i\\sigma}^{\\dagger}\\hat{c}_{j\\sigma}+h.c.\\right)+m\\sum_{i\\sigma}(-1)^{\\boldsymbol{i}}\\hat{c}_{i\\sigma}^{\\dagger}\\hat{c}_{i\\sigma}-\\nonumber \\\\\n & \\lambda\\sum_{\\hexagon}\\boldsymbol{N}\\cdot\\left(\\sum_{\\left\\langle \\left\\langle ij\\right\\rangle \\right\\rangle \\in\\hexagon}i\\nu_{ij}\\hat{c}_{i}^{\\dagger}\\boldsymbol{\\sigma}\\hat{c}_{j}+h.c.\\right).\n\\label{eq:ham_mft}\n\\end{align}\nHere, $m$ is the staggered mass in real space that generates the charge density wave (CDW) long range order. The mass term of amplitude $\\lambda$ is given by the vector product of the O(3) vector $\\boldsymbol{N}$ and the generalized spin-orbit coupling term, which produces the long-range QSH order. The Hamiltonian of Eq.~\\eqref{eq:ham_mft} only contains fermion bilinear terms and can be solved exactly. For a given fermion bilinear Hamiltonian $H_{\\text{bilinear}}=\\hat{c}^{\\dagger}K\\hat{c}$, the disorder operator can be calculate using the following simple formula\n\\begin{align}\n\\left\\langle X_{\\alpha}(\\theta)\\right\\rangle & =\\frac{1}{Z}\\text{Tr}\\left\\{ e^{-\\beta H_{\\text{bilinear}}}X_{\\alpha}(\\theta)\\right\\} \\nonumber \\\\\n & =\\frac{1}{Z}\\text{Tr}\\left\\{ e^{-\\beta\\hat{c}^{\\dagger}K\\hat{c}}e^{i\\hat{c}^{\\dagger}T(\\theta)\\hat{c}}\\right\\}\\nonumber \\\\\n & =\\det\\left(G+e^{iT(\\theta)}\\left(\\boldsymbol{1}-G\\right)\\right)\n \\label{eq:cal_mft_disop}\n\\end{align}\nwhere the matrix $T(\\theta)$ is a diagonal matrix with nonzero diagonal element when the matrix index belong to the region $M$ and $G=\\left(\\boldsymbol{1}+e^{-\\beta K}\\right)^{-1}$ is the Green's function. This allows for an efficient calculation of the determinant in Eq. (\\ref{eq:cal_mft_disop}). In the following, we focus on the disorder operator $X_{c/s}(\\theta)$ as defined in the main text and discuss their behavior for different mean field Hamiltonians. \n\n\\begin{figure}[tb]\n\t\\begin{centering}\n\t\\includegraphics[width=0.48\\textwidth]{mft_dos}\n\t\\par\\end{centering}\n\t\\caption{Local density of state(LDOS) of the model Eq.~(\\ref{eq:ham_mft}) with different model parameters. (a) is the free fermion LDOS for the honeycomb lattice with system size $L=90$. In (b) we consider finite the charge density wave mass by setting $m=1$ and $\\lambda=0$. In (c) we consider the finite QSH type mass by setting $\\lambda=0.3$ and $m=0$.}\n\t\\label{fig:mft_dos}\n\t\\end{figure}\n\t\nFig.~\\ref{fig:mft_dos} depicts the local density of states (LDOS) of the model in Eq.~\\eqref{eq:ham_mft} at various mean-field parameters. By setting $t=1$, $m=0$ and $\\lambda=0$, Eq.~\\eqref{eq:ham_mft} describe a stable Dirac semi-metal ground state in the thermodynamic limit, as presented in Fig.~\\ref{fig:mft_dos} (a). Without the mass term, the Hamiltonian is block diagonal in the spin basis so that the disorder operator in the charge and spin channels are identical. We calculate the disorder operator with the parallelogram region as in the Fig.1 (c) in the main text. In Fig.~\\ref{fig:mft_DSM_disop} (a), for a given rotation angle $\\theta$, the disorder operator $X(\\theta)$ is dominated by the perimeter law decay, $\\text{ln}\\left|X_{c/s}\\left(\\theta\\right)\\right|\\sim -al+s(\\theta)\\ln l+c$. As we discussed above, the free Dirac system is a typical conformal fixed point and the disorder operator has sub-leading logarithmic corrections~\\cite{lliesiuBootstrapping2018,helmesUniversal2016}. We fit the data, Fig.~\\ref{fig:fit_window_mft1}, to extract the \n logarithmic coefficient $s(\\theta)$ as a function of angle, Fig.~\\ref{fig:mft_DSM_disop} (b). Apparently, at small angle, $s(\\theta)\\sim\\alpha\\theta^2$. At large system size, the quadratic coefficient $\\alpha$ converges to a stable value $\\alpha=0.067(5)$. This is comparable to $\\alpha=0.0658$ obtained in the thermodynamic limit. It is equally consistent with the other free Dirac fermion computation for the $\\pi$-flux model in Sec.~\\ref{sec:secVI}. Specifically, we obtain the analytic density correlation function combining the corner distribution of the region $M$, where the detailed analysis is presented in Sec.~\\ref{sec:secVII}. The slight differences in our results stem from finite size effect, which can be eliminated by extrapolation. \n\t\n\\begin{figure}[tb]\n\t\\begin{centering}\n\t\\includegraphics[width=0.48\\textwidth]{log_cof_mft}\n\t\\par\\end{centering}\n\t\\caption{(a)Logarithmic correction term $s_c(\\theta)$ extract from the disorder operator $X_c(\\theta)$ by using the numerical fitting as function of rotation angle $\\theta$. (b)The quadratic coefficient $\\alpha(L)$ as function of $1/L$ from free fermion model.}\n\t\\label{fig:mft_DSM_disop}\n\\end{figure}\n\t\n\\begin{figure}[tb]\n\\begin{centering}\n\\includegraphics[width=0.48\\textwidth]{mft_x_with_mass}\n\\par\\end{centering}\n\\caption{Disorder operator $X_c(\\theta)$ of model Eq.(\\ref{eq:ham_mft}) defined in charge channel as function of perimeter $l$. We set the y axis in logarithmic scale. In (a) we consider free fermion model in honeycomb lattice by setting $m=\\lambda=0$. In (b) we consider the model with finite CDW mass $m=1.0$. In (c) we set $\\lambda=0.3$ to observe the effect of QSH type mass term. We choice system size $L=90$ at (a)(b) and $L=36$ at (c).}\n\\label{fig:mft_mass_disop}\n\\end{figure}\n\t\nIn Fig.~\\ref{fig:mft_mass_disop}, we present the disorder operator results for massive Dirac system. In Fig.~\\ref{fig:mft_mass_disop} (b), we set $t=1$, $m=1.0$ and $\\lambda=0$, which turns on the staggered CDW mass term that breaks sublattice symmetry. The CDW mass term is diagonal in the spin basis, such that the disorder operators are channel independent. The disorder operator in the massive fermion exhibit a much slower decay than the gapless Dirac fermions, but is still dominated by a perimeter law decay. In Fig.~\\ref{fig:mft_mass_disop}(c), we set the parameter $t=1$, $m=0$, $|\\lambda\\boldsymbol{N}|=1$ and $N_x=N_y=N_z$ to turn on the QSH mass term. The QSH mass term breaks the SU(2) symmetry of free Dirac fermion which removes the degeneracy of two channels in the disorder operator. Due to the fact that the SU(2) symmetry break into U(1) symmetry correspond to the rotation around axis $\\boldsymbol{N}$ in the mean field Hamiltonian, \nthe spin disorder operator $X_s(\\theta)$ is not well defined when $\\boldsymbol{N}/|\\boldsymbol{N}|\\ne \\boldsymbol{e}_z$.\n However, the global charge conservation is present for the QSH mass such that $X_c(\\theta)$ is still well defined.\n In Fig.~\\ref{fig:mft_mass_disop}(c), the decay rate of $X_c(\\theta)$ is dominated by the perimeter law. As apparent from \n Fig.~\\ref{fig:fit_window_mft1}(c) logarithmic corrections to the area law are absent in the massive CDW phase. \n \n\\begin{figure}[tb]\n\\begin{centering}\n\\includegraphics[width=0.48\\textwidth]{fit_cof_free}\n\\par\\end{centering}\n\\caption{\nThe fitting range is set by $\\left[ l_{min}, l_{max} \\right] $ and the data is fitted to the form \n$ \\ln \\left|X(\\theta)\\right| = -al+s(\\theta)\\ln l+c. $\nLogarithmic coefficient $s(\\theta)$(a) (c) and linear coefficient $a$ (b) (d) as function of the smallest perimeter $l_{min}$ considered in the fitting. \nWe set $L=90$ and $\\theta=1.0$. (a) and (b) are obtained from the free fermion model and (c) and (d) are obtained from model with CDW mass term. The brown dot lines in (b) and (d) indicate the linear coefficient $a$ extract from the fitting of linear function.}\n\\label{fig:fit_window_mft1}\n\\end{figure}\n\n\\subsection{RG invariant quantities}\nAs a benchmark to previous calculations~\\cite{liu2019superconductivity,liuGross2021}, we present the RG invariant correlation ratio, \n$R_{c}^{\\text{QSH/SC}}$ as a function of coupling constant $\\lambda$ in Fig.\\ref{fig:Rc_qsh}. It is defined as \n\\begin{equation}\nR_{c}^{\\text{QSH/SC}}=1-\\frac{S_{\\text{QSH/U(1)}}\\left(\\boldsymbol{k}=\\Gamma+d\\boldsymbol{k},\\tau=0\\right)}{S_{\\text{QSH/U(1)}}\\left(\\boldsymbol{k}=\\Gamma,\\tau=0\\right)}\n\\end{equation}\nwhere $d\\boldsymbol{k}=(0,\\frac{4\\pi}{\\sqrt{3}L})$ and $S_{\\text{QSH/U(1)}}=\\frac{1}{L^2}\\sum_{\\boldsymbol{r}\\boldsymbol{r^{\\prime}}}e^{i\\boldsymbol{q}\\cdot(\\boldsymbol{r}-\\boldsymbol{r}^{\\prime})}\\mathrm{Tr}\\{\\hat{O}_{\\boldsymbol{r}}\\hat{O}_{\\boldsymbol{r}^{\\prime}}\\}$ is the structure factor defined by the order parameter $\\hat{O}_{\\boldsymbol{r}}$. The QSH local vector order parameter takes the form of a spin current $\\hat{O}^{\\text{QSH}}_{\\boldsymbol{r},\\boldsymbol{\\delta}}=i\\hat{\\boldsymbol{c}}^{\\dagger}_{\\boldsymbol{r}}\\boldsymbol{\\sigma}\\hat{\\boldsymbol{c}}_{\\boldsymbol{r}+\\boldsymbol{\\delta}}+\\text{H.c.}$ where $\\boldsymbol{\\delta}$ runs over all the next-nearest neighbours of the hexagon labeled by $\\ve{r}$. We use the SC local order parameter $\\hat{O}_{\\boldsymbol{r},\\tilde{\\boldsymbol{\\delta}}}^{\\text{SC}}=\\frac{1}{2}\\left(\\hat{c}_{\\boldsymbol{r}+\\tilde{\\boldsymbol{\\delta}},\\uparrow}^{\\dagger}\\hat{c}_{\\boldsymbol{r}+\\tilde{\\boldsymbol{\\delta}},\\downarrow}^{\\dagger}+\\text{H.c.}\\right)$ where $\\tilde{\\boldsymbol{\\delta}}$ runs over the sublattice a,b in the honeycomb lattice.\n\n\n\\label{sec:RG}\n\\begin{figure}[htb]\n\\begin{centering}\n\\includegraphics[width=0.48\\textwidth]{Ratio_GNQCP_DQCP}\n\\par\\end{centering}\n\\caption{RG invariant quantities $R_{c}^{\\text{QSH/SC}}$ as function of $\\lambda$ for the model of Eq.~(1) in the main text. Due to the expected \nLorentz symmetry of the critical points, we scale the \ninverse temperature with system size: $\\beta=1/T=L$.}\n\\label{fig:Rc_qsh}\n\\end{figure}\n\nThe results for $R_{c}^{\\text{QSH}}$ in the vicinity of the GN-QCP are presented in Fig.~\\ref{fig:Rc_qsh}(a). The crossing point of $R_{c}^{\\text{QSH}}$ is stable and yields $\\lambda_{c1} = 0.0187(2)$. In Fig.~\\ref{fig:Rc_qsh}(b), we observe that $R_{c}^{\\text{SC}}$ vanishes upon increasing \nsystem size in the vicinity of the GN-QCP. This reflects the absence of the long-range s-wave pairing at this critical point. In Figs.~\\ref{fig:Rc_qsh}(c) and (d), our QMC data suggest that the trends of the crossing points of the two RG invariant quantities $R_{c}^{\\text{QSH}}$ and $R_{c}^{\\text{SC}}$ \nconverge to the same value as a function of system size. Again, our data is consistent with the estimate $\\lambda_{c2} = 0.0332(2)$.\n\n\\subsection{Raw data of disorder operator in the interacting case}\n\\label{sec:Raw data}\nIn this subsection we show the behavior of the raw data for the disorder operators $X_{c/s}(\\theta)$ for various coupling constants $\\lambda$ and rotation angle $\\theta$. In Fig.~\\ref{fig:x-vs-theta}, the disorder operator $X_{c/s}(\\theta)$ at fixed angle $\\theta=\\pi/4,\\pi/2,3\\pi/4,\\pi$ illustrate the evolution as function of coupling $\\lambda$. Up to $L=15$, the decay rate of $X_{c/s}(\\theta)$ is dominated by the area law $\\sim \\exp(-aL)$. As we increase the coupling, we observe a clear slowing down of the decay rate.\n\n\\begin{figure}[tb]\n\\begin{centering}\n\\includegraphics[width=0.46\\textwidth]{x-vs-theta}\n\\par\\end{centering}\n\\caption{Disorder operator $X_{c}(\\theta)$(a)-(e) and $X_s(\\theta)$(f)-(j) obtained from the model of Eq.~(1) in the main text as function of \nperimeter $l$ on the honeycomb lattice. We consider a logarithmic y-axis and $L=\\beta=15$. Different subplots correspond to different \nrotation angles $\\theta$. In each subplot, we present the disorder operator obtained for different $\\lambda$ for comparison. }\n\\label{fig:x-vs-theta}\n\\end{figure}\n\n\\begin{figure}[htbp]\n\t\\begin{centering}\n\t\\includegraphics[width=0.46\\textwidth]{x-vs-lam}\n\t\\par\\end{centering}\n\t\\caption{Disorder operator $X_{c}(\\theta)$(a)-(e) and $X_s(\\theta)$(f)-(j) obtained for the model of Eq.~(1) in the main text as function of perimeter $l$ on the honeycomb lattice. We consider a logarithmic y-axis and $L=\\beta=15$. Different subplots correspond to different coupling constants $\\lambda$. In each subplot, we present the disorder operator obtained from different rotation angle $\\theta$ for comparison.}\n\t\\label{fig:x-vs-lam}\n\\end{figure}\n\n\n\\begin{figure}[tb]\n\\begin{centering}\n\\includegraphics[width=0.48\\textwidth]{fit_cof_qmc}\n\\par\\end{centering}\n\\caption{Logarithmic coefficient $s(\\theta)$ as function of smallest perimeter $l_{min}$ consider in fitting. We use the QMC results of $L=\\beta=18$ as the input in numerical fitting.}\n\\label{fig:fit_window_qmc}\n\\end{figure}\n\n\\begin{figure}[htbp]\n\t\\begin{centering}\n\t\\includegraphics[width=0.46\\textwidth]{powlaw_fit_qmc}\n\t\\par\\end{centering}\n\t\\caption{Coefficient $\\alpha_{c}(L)$ (a)-(c) and $\\alpha_{s}(L)$ (d)-(f) as function of $1/L$ in the QMC simulation. Different subplot correspond to different coupling constants $\\lambda$. In each subplot we use the power law function $f(x)$ to extract the coefficient $\\alpha_{c/s}(L=\\infty)$ in thermodynamic limit.}\n\t\\label{fig:fig4}\n\\end{figure}\n\n\nWe now focus on the angle dependence for a given coupling. In Fig.~\\ref{fig:x-vs-lam}, we set the coupling constant to be deep in the three phases and also the vicinity of the two critical points and monitor the trends of $X_{c/s}(\\theta)$ as function of $\\theta$. On the whole, the decay rate of $X_{c/s}(\\theta)$ are also dominated by the area law and increase as the angle grows. \n\n\\begin{table}[tbp]\n \\caption{Results of coefficient $\\alpha(L)$ in thermal dynamic limit by using polynomial fitting in Fig.~\\ref{fig:fig4}. }\n \\centering\n \n \\def1.5{1.5}\n \n \\begin{tabular*}{\\linewidth}{@{\\extracolsep{\\fill} } l l c c c c} \n \n \\hline\\hline\n \\multicolumn{4}{c}{\\hspace*{-3em}$\\alpha(L=\\infty)$ as function of $\\lambda$ \\hspace*{-3em}} \\\\\n \\hline\n Channel & $\\lambda=0.006$ & $\\lambda_{c1}=0.01875$ & $\\lambda_{c2}=0.03315$ \\\\[0.5ex]\n \\hline\\hline\n $\\alpha_c(\\infty)$ & 0.068(24) & 0.044(12) & 0.049(17) \\\\ \n\t\\hline\n\t$\\alpha_s(\\infty)$ & 0.068(31) & 0.14(1) & 0.12(4) \\\\\n\t\\hline\\hline\n \\end{tabular*}\n \\label{table:fit_alpha}\n\\end{table}\n\nIn Fig.~\\ref{fig:fit_window_qmc} we present the analysis of the data carried out so as to extract the logarithmic correction to the area law. As for the mean-field case we define a fit window, and vary the bounds so as to obtain reliable results. Clearly, this is a delicate quantity to extract, but we have found that we obtain consistent results for the fitting range $l_{min} \\simeq 10$ and $l_{max} =40$ for our largest $L=18$ system. For the results presented in the main text, we choose $l_{max}=2L+4$ to be the maximum perimeter under periodic boundary condition and $l_{min}=10$ for $L=18$ at DQCP and $l_{min}=8$ for the other parameters.\n\n\nAs supplementary data, we provide the details of the finite size extrapolation analysis of the quadratic coefficients $\\alpha_{c/s}(L)$ in different coupling $\\lambda$. We assume the finite size coefficient obey the power law dependence $\\alpha_{c/s}(L)\\sim \\alpha(\\infty)+\\kappa L^{-e}$ and using this form in numerical fitting to obtain the true coefficient $\\alpha(\\infty)$ in the thermodynamic limit. We consider the system size with $L=6,9,12,15,18$ and $\\beta=L$ scaling in anticipation of $z=1$. These results are shown in Fig.~\\ref{fig:fig4} and Table~\\ref{table:fit_alpha}.\n\n\n\n\n\\section{$\\pi$-flux model}\n\\label{sec:secVI}\nIn this section, we begin with the introduction of the $\\pi$-flux model. Then we discuss the free case of $\\pi$-flux model, including the exact expression for the density correlation function, the disorder operator, and compare with the 2d DQCP model, which is defined on the honeycomb lattice. To generalize to the interacting case, we investigate $\\alpha$ for the free case, at the GN-Ising QCP so as to obtain an estimate of the current central charge $C_J$ for this QCP. \n\n\\subsection{Introduction of $\\pi$-flux model }\n\nTo probe a Gross-Neveu Ising transition, form DSM to QSH , we adopt another fermionic model, $H = H_{\\text{f}} + H_{\\text{Ising}} + H_{\\text{int}}$, defined on the $\\pi$-flux lattice. The interaction, $H_{\\text{int}}$ describes the coupling of the free Dirac fermions to a transverse Ising model, $H_{\\text{Ising}}$. Specifically, \n\\begin{equation}\n\t\\begin{aligned}\n\t\tH_{\\text{f}} &= -t\\sum_{\\left \\langle ij \\right \\rangle \\sigma} (e^{i \\sigma \\phi}\\hat c_{i \\sigma }^\\dagger \\hat c_{j \\sigma } + e^{-i\\sigma\\phi} \\hat c_{j \\sigma}^\\dagger \\hat c_{i \\sigma} ) \\\\\n\t\tH_{\\text{Ising}} &= -J\\sum_{\\left \\langle pq \\right \\rangle } \\hat s_p^z \\hat s_q^z-h\\sum_{p} \\hat s_p^x \\\\\n\t\tH_{\\text{int}} &= \\xi \\sum_{\\langle\\langle ij \\rangle\\rangle \\sigma } \\hat s_p^z (\\hat c_{i\\sigma}^\\dagger \\hat c_{j\\sigma} + \\hat c_{j\\sigma}^\\dagger \\hat c_{i\\sigma})\n\t\\end{aligned}\n\t\\label{eq:Ham_Int}\n\\end{equation}\n$H_{\\text{f}}$ describes the nearest neighbor hopping for fermions, $t=1$, on the $\\pi$-flux lattice, $\\phi=\\frac{\\pi}{4}$, and we request spin-up and spin-down fermions to carrier opposite flux patterns to preserve the time-reversal symmetry for the full Hamiltonian. $H_{\\text{Ising}}$ describes a ferromagnetic $J=1$ transverse-field Ising model. $H_{\\text{int}}$ couples the Ising spins with the next nearest neighbor fermion hoppings. The coupling constant $\\xi = \\pm 1$ has a staggered sign structure alternating between neighboring plaquettes, i.e., $+ (-)$ for solid (dashed) bonds as illustrated in Fig. 1(e) in the main text. Tuning the transverse field $h$ the model undergoes a GN-Ising QCP at $h_c=4.11$ from a DSM phase (at $h< h_c)$ and a QSH state (at $h>h_c$). Fig. 1(b), (e) are the corresponding phase diagram and the choice of the entanglement region $M$. \n\n\\subsection{Density correlation function in the free case}\n\nHere we investigate another lattice regularization of Dirac fermions, namely the $\\pi$-flux model. The Hamiltonian reads, \n\\begin{equation}\n\t\\begin{aligned}\n\t\tH_{\\text{f}} = &-t_1\\sum_{\\left \\langle ij \\right \\rangle} (e^{i \\phi}\\hat c_{i }^\\dagger \\hat c_{j } + e^{-i\\phi} \\hat c_{j}^\\dagger \\hat c_{i} ) \\\\\n\t\t&-t_2\\sum_{\\left \\langle\\langle ij \\right \\rangle\\rangle} (e^{i \\phi}\\hat c_{i }^\\dagger \\hat c_{j } + e^{-i\\phi} \\hat c_{j}^\\dagger \\hat c_{i} )\n\t\\end{aligned}\n\t\\label{eq:Ham}\n\\end{equation}\nThe sketch of the $\\pi$-flux model is shown in Fig.~1(e) in the main text. The fermions are located on the lattice sites, colored in green and blue, indicating two sublattices. $\\hat c^{\\dagger}, \\hat c$ are fermion creation and annihilation operators. The fermion hopping term between nearest two green and blue sites has an extra phase factor $e^{i\\phi}$, whose sign is positive (negative) along the direction of the arrow. The choice $\\phi=\\frac{\\pi}{4}$ produces a phase $\\pi$ in each plaquette. $t_1$ represents nearest hopping for different sublattices, while $t_2$ represents the next-nearest hopping along diagonal lines.\nThe Hamiltonian gives rise to two Dirac cones located at $(0,\\pi)$ and $(\\pi,0)$~\\cite{he2018Dynamical,liuDesigner2020}. As mentioned, we set $t_1=1, t_2=0$. \nThe low energy physics of the $\\pi$-flux model is equivalent to that of the honeycomb lattice and, in the absence of spin degree of freedom, \n is described by two two component Dirac spinors~\\cite{Ihrig2018Critical}. Furthermore, $N_f$ is directly related to the current central charge in CFT. For single free Dirac fermion, $C_{J,free}=2$. In the language of lattice model, e.g. both $\\pi$-flux model, and honeycomb lattice, there are two Dirac points in the Brillouin zone, such that $N_f=2C_{J,free}$. \nNote that the Hamiltonian in Eq.~\\eqref{eq:Ham} involves spinless fermions. When one considers the limit $\\xi=0$ the two spin flavor decouple, corresponding to $N_f=8$.\n\nTo verify $\\alpha=\\frac{N_f}{(4\\pi)^2}$ at small angle for Dirac cones~\\cite{lliesiuBootstrapping2018}, we calculate the density correlation function in $\\pi$-flux model,\n\\begin{equation}\n\tD_{\\pi}(\\mathbf{r})=\\langle \\hat n_{\\mathbf{r}_i} \\hat n_{\\mathbf{r}_j} \\rangle - \\langle \\hat n_{\\mathbf{r}_i} \\rangle \\langle \\hat n_{\\mathbf{r}_j} \\rangle = \\frac{N_{F}}{(4 \\pi)^{2}r^{4}} \\frac{1-2 \\frac{x y}{r^{2}}(-1)^{x+y}}{2}\n\t\\label{eq:den_corr}\n\\end{equation}\nwhere, $\\mathbf{r} = \\mathbf{r}_i - \\mathbf{r}_j$, $r$ is the absolute distance of $\\mathbf{r}$, and $x(y)$ is the component of $\\mathbf{r}$ along the x(y) direction. Here corrections to the rotationally invariant IR result are taken into account. \nTo exhibit the above expression in the lattice model, we provide analytic results of the density correlation function in Fig.~\\ref{fig:den_corr}. In lattice models, such as the $\\pi$-flux model~\\cite{he2018Dynamical,liuDesigner2020}, the density correlation function has small variance with respect to Eq.~\\eqref{eq:den_corr}. The red line indicates the \n$\\frac{N_F}{(4\\pi)^2 r^4}$ form where $N_F=4$ for the $\\pi$-flux model. In Fig.~\\ref{fig:den_corr} (a), we plot $D(\\mathbf{r})$ for each site at $L=1000, t_1=1, t_2=0$ and compare with the analytical formula, yellow dots. \nDifferences are apparent\n at small $r$, corresponding large momentum, or short wave length contributions. The short wave length properties are determined by the lattice structure or the microscopic interacting coefficients. Next in Fig.~\\ref{fig:den_corr} (b), we change $t_2=0.1$ to verify this difference, where two Dirac cones still remain. We find the difference between yellow and blue dots to be at small $r$. To further explore the lattice model implementation, we plot the various system size in Fig.~\\ref{fig:den_corr} (c) and discover when $r$ is comparable with the system size, $D(\\mathbf{r})$ will obviously deviate from the $\\frac{1}{r^4}$ behavior, which is regarded as a finite system effect. \n\n Fig.~\\ref{fig:den_corr} (d) extracts the lattice sites on the diagonal line, i.e., $x=y$, where the oscillation term in Eq.~\\eqref{eq:den_corr} gives 1. \n Here we note that the lattice constant is set to unity such that $x$ and $y$ can be half-integers or integers. \n\n The results nicely match with the red line at moderate $r$ for each system size. Thus we conclude that the density correlation function calculated in the lattice model differs from the continuum limit both at small $r$ and at $r \\sim L$, due to lattice microscopic details and finite system sizes, respectively. \n\n\\begin{figure}[htp!]\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{den_corr_compare_low.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{den_corr_compare_t1_low.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{den_corr_L_low.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{den_corr_L_diag.pdf}\n\t\\end{minipage}\n\t\\caption{Analytical density correlation function in Eq.~\\eqref{eq:den_corr} versus $r$ compared with lattice model results on a ln-ln scale. The red line in each subplots is the same functional form $\\frac{N_F}{(4\\pi)^2 r^4}$, where $N_F=4$ for the spinless $\\pi$-flux model. The points with same color and $r$ represent various values of $x,y$. (a) $D(r)$ for $L=1000, t_1=1, t_2=0$ and dots for Eq.~\\eqref{eq:den_corr}, plotted in blue and yellow. (b) $D(r)$ for $L=1000, t_1=1, t_2=0.0$ and $L=1000, t_1=1, t_2=0.1$, plotted in blue and yellow. (c) $D(r)$ for $t_1=1, t_2=0.0$ at various system $L=400,200$ and $20$ plotted in blue, yellow and green. (d) $D(r)$ for $t_1=1, t_2=0.0$ at diagonal line, i.e. $x=y$ with various system $L=2000, 1000$ and $20$ plotted in blue, yellow and green.}\n\t\\label{fig:den_corr}\n\\end{figure}\n\n\\subsection{The disorder operator in the free case} \nUltiziling Eq.~\\eqref{eq:den_corr}, we calculate the disorder operator at small $\\theta$ in the free case,\n\\begin{equation}\n\t\\frac{\\ln(|X(\\theta)|)}{\\theta^2} = -a_1 l + \\frac{N_f}{(4 \\pi)^2} \\ln l + a_0\n\t\\label{eq:dis_ope}\n\\end{equation} \nThe coefficient of area law $a_1$ is determined by the lattice parameters, e.g., $t_1, t_2$. The logarithmic correction is universal,\n and only depends on $N_f$. Following, we will focus on the subleading logarithmic correction.\n\n\\begin{figure}[htp!]\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_L2000.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_rob_L2000.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_L20.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_rob_L20.pdf}\n\t\\end{minipage}\n\t\\caption{ The scaling behavior of the disorder operator on the $\\pi$-flux lattice at small angle $\\theta=0.1$ and choice of fitting range for determining coefficient $s$ to show the finite size effect. (a,b) for $L = 2000$, (c,d) for $L=20$, comparable with the size of the interacting system. (a,c) plots the original disorder operator data versus perimeter $l$, where $-\\ln X$ seems like one linear function. (b,d) show $s(\\theta)/\\theta^2$ versus various fit range $l \\in \\left[ f_{low}, f_{up}\\right]$. The analytic value at thermodynamic limit $\\frac{N_f}{4\\pi^2}$ is plotted by black solid line. We find if the lower boundary $f_{low}$ is too small, as the same order of $1$, e.g. the blue line in (b), the fit results will overestimate the value. On the contrast, when the upper boundary $f_{up}$ gets larger, the fit results gradually deviate the analytic value. Thus we conclude the proper choice to get $s(\\theta)/\\theta^2 = \\frac{N_f}{4\\pi^2}$ is $1 \\ll f_{low} < f_{up} \\ll L$. For small size $L=20$ in (d), we find the condition is hard to satisfy, thus leads to overestimated value for $s$.}\n\t\\label{fig:diso_ope}\n\\end{figure}\n\nTaking small $\\theta$ as example, Eq.~\\eqref{eq:dis_ope} is strictly valid only in the limit $L, l \\rightarrow \\infty$, since one needs to compute the integral over Eq.~\\eqref{eq:den_corr} in region $M$, i.e. $\\int_{\\mathbf{r}_1 \\in M} d^2 \\mathbf{r}_1 \\int_{\\mathbf{r}_2 \\in M} d^2 \\mathbf{r}_2 D_{\\pi}(\\mathbf{r}_1 - \\mathbf{r}_2)$. Techinically, in Monte Carlo simulation for interacting fermionic model, $l$ and $L$ are both finite. Since $\\ln l$ is small compared to the leading $l$ term, at finite system size, it is hard to obtain $\\alpha$ in the thermodynamic limit by direct fitting for the function form Eq.~(1) in the main text at current system size, i.e. $L=18$ for interacting system. Compared with previous studies on the Bose-Hubbard model, the disorder operator also possess sub-leading $\\ln l$ corrections~\\cite{wangScaling2021}. We numerically find that the coefficient $-a_1$ of the leading area law term in free Dirac fermions is much larger than that in the Bose-Hubbard model, \nshown in Fig.~\\ref{fig:diso_ope}(a) for $L=2000$ and (c) for $L=20$\nwhich leads to difficulties to extract $\\alpha$. \nBesides, we notice that the choice of the fitting range changes the result, Fig.~\\ref{fig:diso_ope}. We denote the fit range as $l \\in \\left[ f_{low}, f_{up}\\right]$. We plot several choices of $f_{low}$ as curves with various colors, the x-axis being $f_{up}$. From the above analysis we conclude \n severe finite size effect for the coefficient. \n\nTo obtain $\\alpha$ in the thermodynamic limit, we adopt two strategies. In the main text pertaining to interacting systems, we directly fit the log correction for each system size, and then extrapolate to $L \\rightarrow \\infty$. Both errorbars stemming from the Monte Carlo data as well as the \nsystematic error generated by variation by the choice of the fitting range are taken into account for the estimate of $\\alpha(L)$. In contrary to the interacting case, for free systems, we are able to reach large enough lattices so as to approach the thermodynamic limit. By changing the fitting range, we show that one can obtain approximate values of $\\alpha$ in the large system size limit. In the following, we will show how to obtain the value, corresponding to $N_f=4$.\n\nTo carry out the analysis, we introduce the concept of so-called optimal fit range. In the finite size, we observe that all choices of the fitting \n range will overestimate the log correction as compared to the value in the thermodynamic limit. Therefore for a given system size, we \n consider the fitting range, which gives the smallest log correction. We find that this smallest log correction will also gradually approach the thermodynamic limit value as $L$ increases. \n\nIn Fig.~\\ref{fig:diso_ope} (b) at a large system size, we observe if $1 \\ll f_{low} < f_{up} \\ll L$ is satisfied, for example $f_{low} = 100, f_{up} = 200$, the fit result $\\frac{s}{\\theta^2}=0.02536$ in (a) is close to the analytic value in the continuum limit $\\frac{N_f}{(4\\pi)^2}=0.02533$. For comparison, at $L=20$ in (c,d), we show that whatever the choice of fit range, the result will be larger than $\\frac{N_f}{(4\\pi)^2}$. Even the optimal fit range $\\left[ 4, 16\\right]$, corresponding to the closest value to the thermodynamic limit at the considered system size, gives $\\frac{s}{\\theta^2}=0.03683$, almost more than one-half the value in the thermodynamic limit. To conclude, we can extract the coefficient as a function of system size $s(\\theta,L)$ with the optimal fitting range strategy and then extrapolate to the thermodynamic limit: $\\frac{s(\\theta \\rightarrow 0, L \\rightarrow \\infty)}{\\theta^2} = \\frac{N_f}{(4\\pi)^2}$. As we showed in Fig.~\\ref{fig:diso_ope}, the most optimal value is always the minimum among all possible fit range. This criterion is more unambiguous than above mentioned $1 \\ll f_{low} < f_{up} \\ll L$. Hence, one may expect $s(\\theta,L)$ to gradually approach $s(\\theta \\rightarrow 0,\\infty)$ as $L$ increase.\n\n\\subsection{Comparison between the $\\pi$-flux model and honeycomb lattice implementation}\n\\label{sec:secVII}\n\n\\begin{figure}[htp!]\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_L900_honey.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_rob_L900_honey.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n\t\t\\includegraphics[width=\\columnwidth]{diso_fit_L24_honey.pdf}\n\t\\end{minipage}\n\t\\begin{minipage}[htbp]{0.49\\columnwidth}\n\t\t\\centering\n \\includegraphics[width=\\columnwidth]{diso_fit_rob_L24_honey.pdf}\n\t\\end{minipage}\n\t\\caption{The scaling behavior of the disorder operator on the honeycomb lattice at small angle $\\theta=0.1$ and choice of fitting \n\t range to determine the coefficient $s$. (a,b) for $L = 900$, (c,d) for $L=24$, comparable with the size of the interacting system. (a,c) plots the original disorder operator data versus perimeter $l$, where from $-\\ln X$ it seems hard to extract the log correction. \n\t (b,d) shows $s(\\theta)/\\theta^2$ versus $f_{up}$ for various fit ranges $l \\in \\left[ f_{low}, f_{up}\\right]$. The analytic value in the thermodynamic limit $\\frac{N_f}{4\\pi^2}$ is plotted by a black solid line. We find that \n\tif the lower bound $f_{low}$ is too small,\n\n\tblue line in (b), the fit results will overestimate the value. In contrast, when the upper boundary $f_{up}$ gets larger, the fit results gradually deviates\n\tfrom the analytic value. Thus we conclude that the proper choice to obtain $s(\\theta)/\\theta^2 = \\frac{N_f}{4\\pi^2}$ is for $1 \\ll f_{low} < f_{up} \\ll L$. For small sizes $L=24$ in (d), we find the condition is hard to satisfy, thus leading to overestimated values for $s$. }\n\t\\label{fig:diso_ope2}\n\\end{figure}\n\nAs for the $\\pi$-flux model, free fermions on the honeycomb lattice form Dirac cones, located at the $(\\frac{2\\pi}{3},\\frac{2\\pi}{3\\sqrt{3}})$ and $(-\\frac{2\\pi}{3},\\frac{2\\pi}{3\\sqrt{3}})$ points in the Brillouin zone. This corresponds to \n$C_{J,free}=2$ and $N_f=4$. We analytically solve the density correlation function to obtain: \n\\begin{equation}\n\tD_h(\\mathbf{r})=\\langle \\hat n_{\\mathbf{r}_i} \\hat n_{\\mathbf{r}_j} \\rangle - \\langle \\hat n_{\\mathbf{r}_i} \\rangle \\langle \\hat n_{\\mathbf{r}_j} \\rangle \\sim \\frac{N_{F}}{(4 \\pi)^{2}r^{4}}\n\t\\label{eq:free4}\n\\end{equation}\n$D_h(\\mathbf{r})$ has a similar from to $D_{\\pi}(\\mathbf{r})$ aside from the oscillation term, shown in Fig.~\\ref{fig:den_corr}, indicating that both \nhave the same scaling behavior of the disorder operator $al + b\\ln l +c$ when carrying out the integral over $M$ of the density fluctuations. \nIt must be emphasized that here the region $M$ is a parallelogram, whose degree of corners are $60^{\\circ}, 60^{\\circ}, 120^{\\circ}, 120^{\\circ}$. Here, we use the conclusions in Ref.~\\cite{wu2021universal}, in which the contribution by corners is described by $s \\propto \\sum_{\\alpha} f(\\alpha)=\\sum_{\\alpha} 2(1+(\\pi-\\alpha)\\cot(\\alpha))$, where the summation runs over the interior angle for region $M$. Thus, we obtain the modification due to the geometry of the parallelogon $M$-region over the square of $A \\approx 1.30$. Hence we have: \n\\begin{equation}\n\t\\frac{\\ln(|X(\\theta)|)}{\\theta^2} = -a_1 l + \\frac{AN_f}{(4 \\pi)^2} \\ln l + a_0\n\t\\label{eq:free5}\n\\end{equation} \nIn Fig.~\\ref{fig:diso_ope2} we carry out a similar analysis as for the square lattice, Fig.~\\ref{fig:diso_ope}. We find $\\frac{s(\\theta \\rightarrow 0, L=24)}{\\theta^2} = 0.04065$, $\\frac{s(\\theta \\rightarrow 0, L=900)}{\\theta^2} = 0.03309$, and $\\frac{s(\\theta \\rightarrow 0, L=24)}{\\theta^2} = 0.03299$. The difference for $L=900$ and $L=24$ compared with the thermodynamic limit value is $0.3 \\%$ and $23\\%$.\n\nThe result can be applied to 2d DQCP model at $\\lambda < \\lambda_{c1}$, where one expects in thermodynamic limit with $A \\approx 1.3$, $N_f=8$, $\\alpha = \\frac{AN_f}{(4\\pi)^2} \\approx 0.0658$.\n\n\n\n\\subsection{$C_J$ at Gross-Neveu QCP}\n\nIn this subsection, we discuss the results at Gross-Neveu QCP, and compare the disorder operator at the \n Gross-Neveu-Ising and Gross-Neveu-Heisenberg transitions. \n As mentioned in the main text, the disorder operator at small $\\theta$ also obeys the expression for Eq.~(1) in the main text, where the $\\ln l$, coefficient named $s(\\theta) \\sim \\alpha \\theta^2$ has the relation with the current central charge $C_J$ in the corresponding \n CFT. Theoretically, one expects $\\alpha = \\frac{AN_{\\sigma}C_J}{8\\pi^2}$ at the Gross-Neveu QCP, where the angle modification coefficient $A = 1$ for the $(90^{\\circ},90^{\\circ},90^{\\circ},90^{\\circ})$ square region and $A \\approx 1.30$ for $(60^{\\circ},60^{\\circ},120^{\\circ},120^{\\circ})$ for the parallelogon. We aim to calculate $C_J$ for the Gross-Neveu Ising and Heisenberg QCPs. \n However, the system size in the interacting case is limited at $L = 20$ for $\\pi$-flux, and $L=24$ for honeycomb, leads to the deviation from the analytic value by fitting, as shown in Fig.~\\ref{fig:diso_ope}(d) and Fig.~\\ref{fig:diso_ope2}(d). That directly indicates that the choice of fit range brings non-negligible influences on the value of $\\alpha$. \n Besides, the original data of interacting case has errorbars, which differs with the non-interacting case, as shown in Fig.~\\ref{fig:diso_ope} and Fig.~\\ref{fig:diso_ope2}. Thus, it is difficulty to fit the disorder operator to obtain $C_J$. Here, based on the knowledge that the disorder operator is the integral over the density correlation function $D(\\mathbf{r})$, we directly calculate $D(\\mathbf{r})$ by DQMC simulations and compare the result \n by fitting the disorder operator. We display the calculations in the interacting Hamiltonian in Eq.~\\eqref{eq:Ham_Int} in the main text, corresponding to $A=1$, and Gross-Neveu-Ising universality. Fig.~\\ref{fig:intdirac} shows the original data for $D(\\mathbf{r})$ in a $\\ln$-$\\ln$ scale for our largest system size. At first sight, $D(\\mathbf{r})$ at $h_c$ still obeys the $\\frac{1}{r^4}$ relation and has a small difference with \n the free results. To investigate the difference carefully, we plot the ratio between $D(\\mathbf{r})$ and $D_{free}(\\mathbf{r})$ for various system sizes. At small $r$, the ratio seems to be independent of $L$, and gradually decreases. We regard this as the effect on the lattice microscopic details, which is not universal. Conversely, at large $r$, the ratio which exceeds 1 results from the finite size effect, as $r$ becomes comparable to $L$. At moderate $r$, we find a minimum for each $L$, and we expect that a plateau corresponding to the minimum will appear at large $L$. We thus plot the so-called extrapolation line in pink and construct the plateau as we expect and finally estimate the value of $\\frac{D}{D_{free}} \\sim 0.77$ in thermodynamic limit.\n\n\\begin{figure}[htp!]\n\t\\centering\n\t\\includegraphics[width=\\columnwidth]{den_int_dirac.pdf}\n\t\\caption{Density correlation function $D(\\mathbf{r})$ versus $\\mathbf{r}$ for the Gross-Neveu Ising QCP. We directly plot $D(\\mathbf{r})$ along the diagonal for our largest system size, e.g. $\\mathbf{r}_x=\\mathbf{r}_y$, and compared with to the free case $D_{free}$, plotted in grey on a $\\ln$-$\\ln$ scale. Obviously both follow the $\\frac{1}{r^4}$ behavior. The difference in such scale is small and hard to distinguish. To obtain $C_J$, $\\frac{D}{D_{free}}$ is displayed in the inset for various values of $L$. We expect at $1 \\ll r \\ll L$, $\\frac{D}{D_{free}}$ to converge to the thermodynamic limit. Limited by the system size, we use the grey thick line to depict the extrapolated value with respect the existing data. We fit for several data points along the envelop with the form, $y=c+a e^{-bx}$. We estimate $\\frac{D}{D_{free}} \\equiv c \\sim 0.77$ in the thermodynamic limit.}\n\t\\label{fig:intdirac}\n\\end{figure}\n\n\n\\section{1d DQCP model}\n\\label{sec:1ddqcp}\n\nIn this section, we provide data on the convergence check of DMRG simulations.\n\n\\begin{figure}[htp!]\n\t\\centering\n\t\\includegraphics[width=\\columnwidth]{Fig_DMRGConvergence.pdf}\n\t\\caption{ We consider the $L=160$ 1D DQCP model at the DQCP point $J_z=J_c=1.4645$ \n\twith periodic boundary condition (PBC). \n\t(a, b) shows the entanglement (von Neumann) entropy $S_\\mathrm{vN}$ and \n\t(c, d) the logarithmic disorder operator \n\t$-\\ln|X_M|$ for the DMRG ground states at different bond dimensions \n\t$D=256, 512, 1024$. \n\t$L_M$ corresponds to the subsystem size and $\\tilde L_M = \\frac{L}{\\pi}\\sin\\frac{\\pi L_M}{L}$.\n\tis the conformal distance \n\tHere, the color code for different bond dimension $D$ as indicated in both \n\tthe panel (a) and (c). \n\tFrom (b) and (d), the central charge $c=0.9991$ and Luttinger parameter \n\t$g=1.3733$ are extracted from the rather straight line composed by the \n\tlargest 40 data (as indicated by the red solid line). \n\t }\n\t\\label{fig:dmrgconv}\n\\end{figure}\n\nThroughout our DMRG simulations for different system size $L=64, 96, 128, 160$, \nwe keep up to 1024 bond states, which render a small truncation error of \n$\\delta \\rho < 5\\times10^{-9}$. In this section, we show that for our largest system size \n$L=160$ and at the critical point $J_z=J_c=1.4645$, \nthe data presented in the main text is well converged.\n\nIn Fig.~\\ref{fig:dmrgconv} (a) and (b), we show the entanglement (von Neumann) entropy \n$S_\\mathrm{vN}=-\\mathrm{tr}(\\rho_M \\ln \\rho_M)$ as functions of subsystem size $L_M$\n and the corresponding conformal distance $\\tilde L_M = \\frac{L}{\\pi}\\sin\\frac{\\pi L_M}{L}$. \n Here $\\rho_M = \\mathrm{tr}_M |\\psi\\rangle\\langle\\psi|$ is the reduced density matrix \n obtained by tracing out the degrees of freedom in subsystem $M$. \n From panel (a) we can see that, $S_\\mathrm{vN}$ is well converged for $D\\geq 512$.\n We then plot the $D=1024$ data versus the logarithmic conformal distance $\\ln{\\tilde L_M}$\n following the expected CFT behaviour $S_\\mathrm{vN} =\\tfrac{c}{3} \\ln{\\tilde L_M}$, \n from which $c=0.9991$ is extracted. \n \nFor the disorder operator $X_M = \\prod_{i\\in M} \\sigma^z_i$, we repeat the similar analysis \nin Fig.~\\ref{fig:dmrgconv}(c) and (d). Again, the logarithmic disorder operator \n$-\\ln{|X_M|}$ is well converged for $D\\geq512$. \nFrom $-\\ln{|X_M|} = \\tfrac{g}{8} \\ln{\\tilde L_M}$, $g=1.3733$ is extracted at the 1D DQCP point.\n\n\\clearpage\n\n\n\n\n"} +{"id": "red-arxiv-11", "source_id": "red-arxiv_11_red-arxiv-11", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\n\n\nMellin-Barnes (MB) integrals appear in many branches of mathematics and physics: asymptotics \\cite{Paris&Kaminsky}, hypergeometric function theory \\cite{KdF,Exton,Marichev}, quantum field theory \\cite{Smirnov:2012gma}, electromagnetic waves in turbulence \\cite{Sasiela}, etc. In all these domains one makes use of these integrals as a powerful computational tool. This has, for instance, been recalled very recently in the context of particle physics in \\cite{Belitsky:2022gba,Dubovyk:2022obc}. Focusing on the latter domain, it is fair to say that the community of high energy physics has been particularly involved in the study of MB integrals and, during the last two decades, in the development of softwares dedicated to their application to the calculations of Feynman integrals (see \\cite{Smirnov:2012gma,Dubovyk:2022obc} and references therein).\nIndeed in the phenomenology of particle physics, the computational need is so huge and the complexity of the calculations so high that it is often impossible to avoid computers for the calculations. MB integrals have not been an exception to this rule and powerful softwares are now available to ease their use \\cite{MBtools}.\n\nAlthough MB integrals have been widely studied, the problem of their analytic and numerical evaluation is still an active field of research, in particular when these integrals are multifold. In \\cite{Ananthanarayan:2020fhl}, an important progress has been achieved in this context, where the first systematic method of computing multifold Mellin-Barnes (MB) integrals analytically, in a non-iterative way, has been presented, along with the powerful and user-friendly \\textit{Mathematica} package \\texttt{MBConicHulls.wl} allowing applications of this technique in an automatic way. From this approach, one obtains series representations of multiple MB integrals by computing the latter using multidimensional residue theory. In general, these series representations have the form of linear combinations of multivariable hypergeometric series (and/or derivatives of such series with respect to their parameters). These representations are useful in various fields of physics and mathematics, such as in quantum field theory (for the computation of Feynman integrals, as mentioned above) or in the theory of multivariable hypergeometric functions (for the study of their transformation theory). \nOne strength of the approach of \\cite{Ananthanarayan:2020fhl} is that, in the common situation where several series representations of the object under study, all being convergent\\footnote{When all series representations are converging, this is denoted as a degenerate case \\cite{Ananthanarayan:2020fhl}.}, coexist, it bypasses one major difficulty met in other calculational approaches such as, in physics, the negative dimension approach \\cite{Halliday:1987an,dunne1987negative} (see also \\cite{DelDuca:2009ac}), the MB approach developed in \\cite{Sasiela}, the method of brackets \\cite{Gonzalez:2010uz} or other more recent techniques, such as the one developed in \\cite{Loebbert:2019vcj} based on Yangian symmetry. All these methods need a detailed convergence analysis of the complete set of series involved in the calculation, which can be more than thousands in non-trivial cases \\cite{Loebbert:2019vcj,Ananthanarayan:2020ncn}. In contrast, in \\cite{Ananthanarayan:2020fhl} the series representations are not obtained from a convergence analysis, but from a simple geometrical approach based on the study of specific intersections of conic hulls associated with the MB integral. This allows one to derive the series representations in complicated cases with many variables where the other methods above fail. Another important advantage of the approach of \\cite{Ananthanarayan:2020fhl}, compared to all other methods, is that, in the case where one is interested in the convergence region of a given series representation, one can focus on a single master series and not on all the series that form the series representation (because from the conic hull approach one master series can be obtained for each of the various series representations). This considerably simplifies the convergence analysis.\n\n \n\nThe first applications of the computational method of \\cite{Ananthanarayan:2020fhl} have been published in \\cite{Ananthanarayan:2020ncn,Ananthanarayan:2020xpd} and \\cite{Ananthanarayan:2020fhl}, where complicated conformal Feynman integrals have been evaluated analytically for the first time, and in \\cite{Friot:2022dme}, where it was shown, on the example of Srivastava's triple hypergeometric function $H_C$, that this method can be a powerful tool for the derivation of linear transformations of multivariable hypergeometric functions. In all these calculations, the contours of integrations of the involved MB integrals are such that they do not ``split'' the sets of poles of each of the gamma functions that belong to the numerator of the MB integrand in subsets, because this is the way the fundamental objects under study, \\textit{i.e.} (dimensionally regularized) Feynman integrals and hypergeometric functions, are well-defined in terms of MB integrals. This condition, in general, forces the contours to be non-straight.\n\nIn this paper, we consider the different situation where the contours of the multifold MB integrals can be any straight lines parallel to the imaginary axes in the complex planes of the integration variables (these lines, obviously, avoid the poles of the MB integrand). In mathematics, this is a problem of general interest. In quantum field theory, this kind of MB integrals appear when one computes the $\\epsilon$-expansion of dimensionally regularized Feynman integrals following the MB representation approach summarized in Chapter 5 of \\cite{Smirnov:2012gma}. Indeed, in this approach, one performs the $\\epsilon$-expansion at the MB integral level, which asks to resolve the problem of $\\epsilon$-singularities following two main strategies (called A and B in the literature \\cite{Smirnov:2012gma,Smirnov:2009up}), both leading to multifold MB integrals with straight contours having the shape described above.\n\nMB integrals with straight contours generally have the sets of poles of some or all the gamma functions of the numerator of their integrand split in subsets by the contours. This prevents from directly applying the method of \\cite{Ananthanarayan:2020fhl} to the computation of such integrals: it is first necessary to perform some transformations of the MB integrand, as we show in the next section. Therefore, in order to deal with these cases, we have adapted the \\texttt{MBConicHulls.wl} package (which can be downloaded from \\cite{git}) by adding an option in the code allowing the user to define straight contours of integration. Once the straight contours are specified by the user, the package performs the corresponding necessary transformations automatically and the results of the computation of the multifold MB integral can then be derived in an automatic way, as done with the original version of the package presented in \\cite{Ananthanarayan:2020fhl}.\n\nWe explain these considerations in detail in Section \\ref{section1}, on the simple example of a 2-fold MB integral where the calculations of the transformations are done by hand, whereafter we present the corresponding syntax that has to be used when using the new version of our package for an automatic treatment of the same calculations. In Section \\ref{section2} we show a non-trivial application of our method by computing the $\\epsilon$-expansion of the dimensionally regularized massless one-loop pentagon integral in general kinematics and $D=4-2\\epsilon$. This calculation involves several MB integrals, up to 4-fold, with straight contours, and we show how one can easily obtain different series representations of the pentagon from these integrals. To our knowledge, these results have not been previously published in the literature. An alternative computational approach of multifold MB integrals with straight contours has been developed in \\cite{Ochman:2015fho} and automatized in the \\texttt{MBsums.m} \\textit{Mathematica} package. It is based on an iterative approach: the MB integrals are evaluated sequentially. In contrast, our \\texttt{MBConicHulls.wl} package computes multifold MB integrals in a non-iterative way. As one will see, the pentagon example of Section \\ref{section2} gives us the opportunity to compare these two different approaches and packages. \nThe conclusions of our paper then follow.\n\n\n\n\n\n\\section{$N$-fold MB integrals with straight contours\\label{section1}}\n\n\nThe general form of the $N$-fold MB integrals with straight contours that we consider in this work is\n\\begin{align} \\label{N_MB}\n I &(x_1,x_2,\\cdots ,x_N) = \\int\\limits_{c_1-i \\infty}^{c_1+i \\infty} \\frac{ \\text{d} z_1}{2 \\pi i} \\cdots \\int\\limits_{c_N-i \\infty}^{c_N+i \\infty}\\frac{ \\text{d} z_N}{2 \\pi i}\\,\\, x^{z_1}_{1} \\cdots x^{z_N}_{N} \\frac{\\prod\\limits_{i=1}^{k} \\Gamma^{a_i}({\\bf e}_i\\cdot{\\bf z}+g_i)}{\\prod\\limits_{j=1}^{l} \\Gamma^{b_j}({\\bf f}_j\\cdot{\\bf z}+h_j)}\n\\end{align}\nwhere $a_i , b_j, k, l$, $N$ are positive integers, ${\\bf z}=(z_1, \\cdots, z_N)$, ${\\bf e}_i$ and ${\\bf f}_j$ are $N$-dimensional real vectors while $g_i$, $h_j$ and the variables $x_1 , \\cdots , x_N$ can be complex. \nThe integration contours are such that $\\Re(z_i)=c_i$ for $i=1,...,N$, \\textit{i.e.} they form straight lines parallel to the imaginary axes in each of the $z_i$ complex planes. We restrict our discussion in this paper to the case where the vector ${\\bf\\Delta}\\doteq\\sum_ia_i{\\bf e}_i-\\sum_jb_j{\\bf f}_j={\\bf 0}$. This is the degenerate case that we mentioned in the introduction and which, to our knowledge, includes the class of MB representations appearing in Feynman integral calculus. The non-degenerate case is presently under study.\n\nAs mentioned in the introduction, in the original computational approach of multifold MB integrals presented in \\cite{Ananthanarayan:2020fhl} (we do not recall this approach here and refer the reader to \\cite{Ananthanarayan:2020fhl} and to \\cite{Ananthanarayan:2020xpd} for technical details), it is assumed that the contours do not split, for each of the gamma functions of the numerator of the MB integrand, their set of poles in different subsets. \nAn equivalent way to formulate this assumption, in the straight contour case described above, is that the real part of the arguments of each of the gamma functions of the numerator of the MB integrand must be positive for any values of the integration variables running on the contours (this, in passing, is a necessary requirement to derive well-defined MB representations with straight contours for Feynman integrals \\cite{Tausk:1999vh,Anastasiou:2005cb}).\nObviously, when computing multiple MB integrals with straight contours as given in Eq.(\\ref{N_MB}), this requirement is in general not satisfied, hence one cannot directly apply the method of \\cite{Ananthanarayan:2020fhl} for the computation of these integrals. It is first necessary to transform those gamma functions that do not have their arguments with positive real parts, in such a way that, for the chosen straight contours, they satisfy this condition. This can be done using the generalized Euler reflection formula, as we show here on a simple example.\n\nLet us consider the following 2-fold MB integral\n\\begin{align} \\label{2_MB}\n I &(x_1,x_2) = \\int\\limits_{c_1-i \\infty}^{c_1+i \\infty} \\frac{ \\text{d} z_1}{2 \\pi i} \\int\\limits_{c_2-i \\infty}^{c_2+i \\infty}\\frac{ \\text{d} z_2}{2 \\pi i}\\,\\, (-x_1)^{z_1} (-x_2)^{z_2} \\Gamma(-z_1)\\Gamma(-z_2) \\frac{ \\Gamma(\\frac{3}{7}+z_1+z_2)\\Gamma(\\frac{2}{3}+z_1)\\Gamma(\\frac{3}{5}+z_2)}{\\Gamma(\\frac{1}{2}+z_1+z_2)} \n\\end{align}\nIf one chooses the contours of integration such that all the five gamma functions in the numerator of the MB integrand satisfy the positivity constraint of the real part of their respective argument for any $z_1$ and $z_2$ running on the contours, for instance by fixing $c_1=-\\frac{1}{7}$ and $c_2=-\\frac{1}{9}$, then, up to an overall factor, one recognizes the MB representation of the Appell $F_1$ function\n\\begin{align} \\label{2_MB_F1}\n I &(x_1,x_2) = \\frac{ \\Gamma(\\frac{3}{7})\\Gamma(\\frac{2}{3})\\Gamma(\\frac{3}{5})}{\\Gamma(\\frac{1}{2})} F_1\\left(\\frac{3}{7},\\frac{2}{3},\\frac{3}{5};\\frac{1}{2};x_1,x_2\\right)\n\\end{align}\nFor this choice of straight contours the sets of poles of each of the gamma functions in the MB integrand are not split in different subsets. One can see this fact by looking at Fig. \\ref{Singular} \\textit{Left} where the red point $(c_1,c_2)=(-\\frac{1}{7},-\\frac{1}{9})$ is surrounded by the singular lines of the five gamma functions of the numerator of the MB integrand in a particular way, whose visualization we have tried to ease by giving an identical color to all the singular lines of a given gamma function. Indeed, it is clear from the picture that the point $(-\\frac{1}{7},-\\frac{1}{9})$ is not located between two singular lines of the same color. This is what is meant when we say that, for each of the gamma functions in the numerator of the MB integrand, the corresponding set of poles is not split in subsets by the contours. Therefore, the result of Eq.(\\ref{2_MB_F1}) can be directly checked using the original version of our \\texttt{MBConicHulls.wl} \\textit{Mathematica} package (\\textit{i.e.} without explicitly fixing the contours), as the package is based on this assumption. Doing this exercise, one obtains from the package that there are 8 different conic hulls associated to this MB integral and that these lead to 5 different series representations. The simplest of the latter is the first one, obtained using the \\texttt{MBResolve[,1]} and \\texttt{EvaluateSeries[,1]} commands of the package. It gives the well-known double series representation of $F_1$ and its overall factor written in Eq.(\\ref{2_MB_F1}) above.\n\\begin{figure}[h]\n\\centering\n\\includegraphics[width=7cm, height=7cm]{Singular1.pdf}\n\\hspace{1cm}\n\\includegraphics[width=7cm, height=7cm]{Singular3.pdf}\n\\caption{Singular structure, in the $(\\Re(z_1),\\Re(z_2))$-plane, of the integrand of \\textit{Left}: Eq.(\\ref{2_MB}) and \\textit{Right}: Eq.(\\ref{2_MB2}). All the poles (represented as singular lines in the figures) of a given gamma function of the numerator of the corresponding MB integrands are plotted with the same color (for instance, the poles of $ \\Gamma(\\frac{3}{7}+z_1+z_2)$ are the oblique lines shown in light blue). In the \\textit{Right} figure, $\\Gamma(-2+z_1)$ (resp. $\\Gamma(-\\frac{3}{5}-z_2)$) has only 3 (resp. 1) singular lines, as the others are cancelled by the denominator of the MB integrand. The red point is $(c_1,c_2)=(-\\frac{1}{7},-\\frac{1}{9})$ and the blue one is $(c_1,c_2)=(\\frac{7}{3},-\\frac{3}{2})$.\\label{Singular}}\n\\end{figure}\n\nWe now want to compute the MB integral of Eq.(\\ref{2_MB}) in the less trivial situation where $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$.\n\nIn this case, the first and fifth gamma functions of the numerator of the MB integrand in Eq.(\\ref{2_MB}) have arguments with negative real parts. Therefore, their sets of poles are now split by the contours, as can be seen in Fig. \\ref{Singular} \\textit{Left} (the point $(c_1,c_2)=(\\frac{7}{3},-\\frac{3}{2})$ is located between singular lines having the same color: two are yellow and two are green). Therefore, in order to apply our package for the evaluation of the corresponding MB integral, we have to find a way to transform these gamma functions in order that the real part of their arguments become positive for $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$. This can be done at the cost of introducing more gamma functions in the integrand, by using the generalized reflection formula:\n\\begin{align}\n\\Gamma(z-n)=\\frac{\\Gamma(z)\\Gamma(1-z)(-1)^n}{\\Gamma(n+1-z)}\\label{reflection}\n\\end{align}\nIndeed, rewriting the first gamma function as\n\\begin{align}\\label{firstReflection}\n\\Gamma(-z_1)=\\frac{\\Gamma(3-z_1)\\Gamma(-2+z_1)(-1)^3}{\\Gamma(1+z_1)}\n\\end{align}\nand the fifth one as \n\\begin{align}\\label{secondReflection}\n\\Gamma\\left(\\frac{3}{5}+z_2\\right)=-\\frac{\\Gamma(-\\frac{3}{5}-z_2)\\Gamma(\\frac{8}{5}+z_2)}{\\Gamma(\\frac{2}{5}-z_2)}\n\\end{align}\none sees that for $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$ both gamma functions in the numerator of the RHS of Eq.(\\ref{firstReflection}) and Eq.(\\ref{secondReflection}) will now have arguments with positive real parts (in contrary with the LHS).\n\nThis allows us to write Eq.(\\ref{2_MB}) in the following equivalent way\n\\begin{align} \\label{2_MB2}\n I (x_1,x_2) = \\int\\limits_{c_1-i \\infty}^{c_1+i \\infty} \\frac{ \\text{d} z_1}{2 \\pi i} \\int\\limits_{c_2-i \\infty}^{c_2+i \\infty}\\frac{ \\text{d} z_2}{2 \\pi i}\\,\\, (-x_1)^{z_1} &(-x_2)^{z_2} \\frac{\\Gamma(3-z_1)\\Gamma(-2+z_1)}{\\Gamma(1+z_1)}\\Gamma(-z_2) \\nonumber\\\\\n &\\times \\frac{ \\Gamma(\\frac{3}{7}+z_1+z_2)\\Gamma(\\frac{2}{3}+z_1)}{\\Gamma(\\frac{1}{2}+z_1+z_2)}\\frac{\\Gamma(-\\frac{3}{5}-z_2)\\Gamma(\\frac{8}{5}+z_2)}{\\Gamma(\\frac{2}{5}-z_2)} \n\\end{align}\nwhere now all the gamma functions in the numerator have arguments with positive real parts for the chosen contours $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$. \n\nAs, for $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$, this MB integral is now satisfying the main constraint for the use of the original version of our \\texttt{MBConicHulls.wl} package (see Fig. \\ref{Singular} \\textit{Right} where the point $(c_1,c_2)=(\\frac{7}{3},-\\frac{3}{2})$ is not anymore located between singular lines having the same color), it can be computed with the latter (\\textit{i.e} without specifying the contours of integrations). However, there are now 15 different conic hulls, which lead to 5 possible series representations for the MB integral of Eq.(\\ref{2_MB2}) and the package gives, for the first series representation, the following result:\n\\begin{align}\nI(x_1,x_2)\\underset{\\vert x_1\\vert<1\\wedge\\vert x_2\\vert<1}=&(-x_1)^3\\sum_{n_1=0,n_2=0}^{\\infty}\\frac{\\Gamma(\\frac{11}{3}+n_1)\\Gamma(-\\frac{3}{5}-n_2)\\Gamma(\\frac{8}{5}+n_2)\\Gamma(\\frac{24}{7}+n_1+n_2)}{\\Gamma(4+n_1)\\Gamma(\\frac{2}{5}-n_2)\\Gamma(\\frac{7}{2}+n_1+n_2)}x_1^{n_1}\\frac{x_2^{n_2}}{n_2!}\\nonumber\\\\\n&+(-x_1)^3(-x_2)^{-\\frac{3}{5}}\\Gamma\\left(\\frac{3}{5}\\right)\\sum_{n_1=0}^{\\infty}\\frac{\\Gamma(\\frac{11}{3}+n_1)\\Gamma(\\frac{99}{35}+n_1)}{\\Gamma(4+n_1)\\Gamma(\\frac{29}{10}+n_1)}x_1^{n_1}\\label{result}\n\\end{align}\nOne can check that this result is correct by directly computing Eq.(\\ref{2_MB}) with $c_1=\\frac{7}{3}$ and $c_2=-\\frac{3}{2}$ using the computational approach of \\cite{Friot:2011ic} (see also \\cite{Passare:1996db,TZ}). Indeed, it can be seen from Fig. \\ref{Cone} that the cone corresponding to this series representation has two different sets of singular points from which Eq.(\\ref{result}) can be reobtained.\n\\begin{figure}[h]\n\\centering\n\\includegraphics[width=8cm, height=8cm]{Cone.pdf}\n\\caption{Cone (shaded area in light blue) associated with the series representation of Eq.(\\ref{2_MB}) computed in the text. We refer the reader to \\cite{Friot:2011ic} for details about the derivation of the cone using the dashed black line.\\label{Cone}}\n\\end{figure}\nThe first set is $(z_1,z_2)=(3+n_1,n_2)$ which gives the contribution\n\\begin{align}\nI_1=-(-x_1)^3\\frac{\\Gamma(\\frac{24}{7})\\Gamma( \\frac{11}{3})\\Gamma( \\frac{3}{5})}{\\Gamma( \\frac{7}{2})\\Gamma(4)}{F}{}^{1:2;1}_{1:1;0}\n \\left[\n \\setlength{\\arraycolsep}{0pt\n \\begin{array}{c@{{}:{}}c@{;{}}c}\n \\frac{24}{7} & \\frac{11}{3},1 & \\frac{3}{5}\\\\[1ex]\n \\frac{7}{2} & 4 & -\n \\end{array}\n \\;\\middle|\\;\nx_1, x_2\n \\right]\n\\end{align}\nwhere ${F}{}^{1:2;1}_{1:1;0}$ is a Kamp\\'e de F\\'eriet double hypergeometric series \\cite{KdF,Srivastava}. $I_1$ matches with the first term of Eq.(\\ref{result}) once Eq.(\\ref{reflection}) is used in the latter. The second set is $(z_1,z_2)=(3+n_1,-\\frac{3}{5})$, whose associated residues give\n\\begin{align}\nI_2=(-x_1)^3(-x_2)^{-\\frac{3}{5}}\\Gamma\\left(\\frac{3}{5}\\right)\\frac{\\Gamma(\\frac{99}{35})\\Gamma(\\frac{11}{3})}{\\Gamma(4)\\Gamma(\\frac{29}{10})}{}_3F_2\\left(\\frac{99}{35},\\frac{11}{3},1;4,\\frac{29}{10};x_1\\right)\n\\end{align}\nwhich is equal to the second term of Eq.(\\ref{result}).\n\nWe have seen above, as a motivating example, how to treat a simple case of multifold MB integral with arbitrary straight contours, by hand. We now show how the same calculations can be performed in an automatic way using the new version of our \\texttt{MBConicHulls.wl} package that we have implemented and which now allows the user to choose, if needed, arbitrary straight contours of integration for multifold MB integrals (details about some of the commands that are used in this calculations can be found in \\cite{Ananthanarayan:2020fhl}).\n\n\nFor this, we first load in a \\textit{Mathematica} notebook the package as follows:\n\\medskip\n\n\\texttt{In[1]:SetDirectory[NotebookDirectory[]];}\n\n\\texttt{In[2]:=< n_3 +n_4} \\Gamma \\left(n_1+n_2+1\\right) \\Gamma \\left(n_1-n_3-n_4\\right) \\Gamma \\left(n_3+n_4+1\\right) \\Gamma \\left(\\epsilon +n_1+n_2-n_4+2\\right)\n\\nonumber \\\\ & \\times\n \\Gamma\n \\left(-\\epsilon -n_1+n_4-1\\right) \\Gamma \\left(-\\epsilon -n_1-n_2+n_3+n_4-1\\right)\n \\frac{(-u_1/u_3)^{n_1} (-u_2/u_3)^{n_2} (-u_4)^{n_3} (-u_3)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_4=& -u_3^{-2-\\epsilon} \\sum\\limits_{n_1 \\leq n_3 +n_4} \n\\frac{\\Gamma \\left(n_1+n_2+1\\right) \\Gamma \\left(n_3+n_4+1\\right) \\Gamma \\left(\\epsilon +n_1+n_2-n_4+2\\right) \\Gamma \\left(-\\epsilon -n_1+n_4-1\\right)}{\\Gamma(1-n_1+n_3+n_4)}\n\\nonumber \\\\ & \\times\n \\Gamma \\left(-\\epsilon -n_1-n_2+n_3+n_4-1\\right) \\bigg(\\psi\\left(-\\epsilon -n_1-n_2+n_3+n_4-1\\right)-\\psi\\left(n_3+1\\right)\n\\nonumber \\\\ & \n +\\psi\\left(n_3+n_4+1\\right)-\\psi\\left(-n_1+n_3+n_4+1\\right)+\\log \\left(u_4\\right)\\bigg)\n \\frac{(u_1/u_3)^{n_1} (-u_2/u_3)^{n_2} u_4^{n_3} u_3^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_5=& u_3^{-2-\\epsilon} \\sum\\limits_{n_3 > n_1 +n_4} \\Gamma \\left(n_1+n_2+1\\right) \\Gamma \\left(-n_1+n_3-n_4\\right) \\Gamma \\left(n_1+n_4+1\\right) \\Gamma \\left(\\epsilon +n_1+n_2-n_3+2\\right)\n\\nonumber \\\\ & \\times\n \\Gamma\\left(-\\epsilon -n_1+n_3-1\\right) \\Gamma \\left(-\\epsilon -n_2+n_4-1\\right)\n \\frac{(-u_1 u_4 /u_3)^{n_1} (-u_2/u_3)^{n_2} (-u_3/u_4)^{n_3} (-u_4)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_6=& u_2^{-1-\\epsilon} u_4^{-1} \\sum\\limits_{n_i=0}^{\\infty}\n\\frac{\\Gamma \\left(n_2+n_3+1\\right) \\Gamma \\left(n_1+n_4+1\\right) \\Gamma \\left(\\epsilon -n_3-n_4+1\\right) \\Gamma \\left(-\\epsilon +n_1+n_3+n_4\\right)}{\\Gamma(2+ n_1+n_2+n_3+n_4)}\n\\nonumber \\\\ & \\times\n \\Gamma \\left(-\\epsilon +n_2+n_3+n_4\\right) \\bigg(\\psi\\left(-\\epsilon +n_2+n_3+n_4\\right)-\\psi\\left(n_2+1\\right) +\\psi\\left(n_2+n_3+1\\right)\n\\nonumber \\\\ & \n -\\psi\\left(n_1+n_2+n_3+n_4+2\\right)+\\log \\left(u_3\\right)-\\log \\left(u_4\\right)\\bigg)\n \\frac{u_1^{n_1} (u_3/u_4)^{n_2} (u_2/u_4)^{n_3} u_2^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_7=& u_2^{-1-\\epsilon} u_3^{-1} \\sum_{n_1 > n_2 +n_4} \\Gamma \\left(n_2+n_3+1\\right) \\Gamma \\left(n_1-n_2-n_4\\right) \\Gamma \\left(n_2+n_4+1\\right) \\Gamma \\left(\\epsilon +n_1-n_2-n_3-n_4+1\\right)\\nonumber \\\\ & \\times\n \\Gamma \\left(-\\epsilon -n_1+n_4-1\\right) \\Gamma \\left(-\\epsilon +n_2+n_3+n_4\\right) \\frac{(-u_1/u_2)^{n_1} (-u_2 u_4/u_3)^{n_2} (-u_2/u_3)^{n_3} (-u_2)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_8=& -u_2^{-1-\\epsilon} u_3^{-1} \\sum\\limits_{n_1 \\leq n_2 +n_4} \n\\frac{\\Gamma \\left(n_2+n_3+1\\right) \\Gamma \\left(n_2+n_4+1\\right) \\Gamma \\left(\\epsilon +n_1-n_2-n_3-n_4+1\\right)}{\\Gamma(1-n_1+n_2+n_4)} \n\\nonumber \\\\ & \\times\n\\Gamma \\left(-\\epsilon\n -n_1+n_4-1\\right) \\Gamma \\left(-\\epsilon +n_2+n_3+n_4\\right) \\bigg(-\\psi\\left(\\epsilon +n_1-n_2-n_3-n_4+1\\right)\n \\nonumber \\\\ &\n +\\psi\\left(-\\epsilon +n_2+n_3+n_4\\right)-\\psi\\left(n_2+1\\right)+\\psi\\left(n_2+n_3+1\\right)+\\psi\\left(n_2+n_4+1\\right)\n \\nonumber \\\\ &\n -\\psi\\left(-n_1+n_2+n_4+1\\right)\n +\\log \\left(u_2\\right)-\\log \\left(u_3\\right)+\\log \\left(u_4\\right)\\bigg)\n \\frac{(u_1/u_2)^{n_1} (u_2 u_4/u_3)^{n_2} (- u_2/u_3)^{n_3} u_2^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_9=& u_2^{-1-\\epsilon} u_3^{-1} \\sum_{\\mathclap{\\substack{n_3 > n_1 +n_4\\\\ 1+n_1+n_2+n_4 > n_3}}} \\Gamma \\left(-n_1+n_3-n_4\\right) \\Gamma \\left(n_1+n_4+1\\right) \\Gamma \\left(n_1+n_2-n_3+n_4+1\\right) \\Gamma \\left(-\\epsilon -n_1+n_3-1\\right)\n\\nonumber \\\\ & \\times\n \\Gamma \\left(\\epsilon -n_2-n_4+1\\right) \\Gamma \\left(-\\epsilon +n_1+n_2+n_4\\right) \\frac{(-u_1 u_4/u_3)^{n_1} (-u_2 /u_3)^{n_2} (-u_3/u_4)^{n_3} (-u_2 u_4/u_3)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_{10}=& u_1^{-1-\\epsilon} u_3^{-1} \\sum_{n_2>n_1+n_3} \\Gamma \\left(-n_1+n_2-n_3\\right) \\Gamma \\left(n_1+n_3+1\\right) \\Gamma \\left(n_2+n_4+1\\right) \\Gamma \\left(-\\epsilon -n_2+n_3-1\\right)\n\\nonumber \\\\ & \\times\n \\Gamma\n \\left(\\epsilon -n_3-n_4+1\\right) \\Gamma \\left(-\\epsilon +n_1+n_3+n_4\\right) \\frac{(-u_2/u_3)^{n_1} (-u_4)^{n_2} (-u_1/u_3)^{n_3} (-u_1)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\n\\begin{align}\nS_{11}=& -u_1^{-1-\\epsilon} u_3^{-1} \\sum\\limits_{n_2 \\leq n_1 +n_3} \n\\frac{\\Gamma \\left(n_1+n_3+1\\right) \\Gamma \\left(n_2+n_4+1\\right) \\Gamma \\left(-\\epsilon -n_2+n_3-1\\right)}{\\Gamma(1+n_1-n_2+n_3)} \n\\nonumber \\\\ & \\times\n\\Gamma \\left(\\epsilon -n_3-n_4+1\\right)\n \\Gamma \\left(-\\epsilon +n_1+n_3+n_4\\right) \\bigg(-\\psi\\left(-\\epsilon +n_1+n_3+n_4\\right)+\\psi\\left(n_1+1\\right)\n \\nonumber \\\\ &-\\psi\\left(n_1+n_3+1\\right)+\\psi\n \\left(n_1-n_2+n_3+1\\right)-\\log \\left(u_2\\right)+\\log \\left(u_3\\right)\\bigg)\n \\frac{(u_2/u_3)^{n_1} u_4^{n_2} (u_1/u_3)^{n_3} (-u_1)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_{12}=& (u_1 u_4)^{-1-\\epsilon} u_3^{-1} \\sum_{n_i=0}^{\\infty}\\Gamma \\left(n_1+n_2+1\\right) \\Gamma \\left(\\epsilon -n_2-n_3+1\\right) \\Gamma \\left(-\\epsilon +n_1+n_2+n_3\\right) \\Gamma \\left(\\epsilon\n -n_2-n_4+1\\right)\n\\nonumber \\\\ & \\times\n \\Gamma \\left(-\\epsilon -n_1+n_4-1\\right) \\Gamma \\left(-\\epsilon +n_2+n_3+n_4\\right) \\frac{(-u_2/u_3)^{n_1} (-u_1 u_4/u_3)^{n_2} (-u_1)^{n_3} (-u_4)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_{13}=& u_1^{-1-\\epsilon} u_3^{-1} \\sum_{n_2>n_1+n_3} \\Gamma \\left(-n_1+n_2-n_3\\right) \\Gamma \\left(n_1+n_3+1\\right) \\Gamma \\left(n_1+n_4+1\\right) \\Gamma \\left(-\\epsilon -n_1+n_2-1\\right)\n\\nonumber \\\\ & \\times\n \\Gamma\n \\left(\\epsilon -n_2-n_4+1\\right) \\Gamma \\left(-\\epsilon +n_1+n_3+n_4\\right) \\frac{(-u_2 u_4/u_3)^{n_1} (-u_1/u_2)^{n_2} (-u_2/u_3)^{n_3} (-u_1)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\\begin{align}\nS_{14}=& (u_1 u_2 u_4)^{-1-\\epsilon} u_3^{\\epsilon} \\sum_{n_i=0}^{\\infty}\\Gamma \\left(\\epsilon -n_1-n_3+1\\right) \\Gamma \\left(\\epsilon -n_1-n_4+1\\right) \\Gamma \\left(\\epsilon -n_2-n_4+1\\right) \n\\nonumber \\\\ & \\times\n \\Gamma \\left(-\\epsilon\n +n_1+n_2+n_4\\right)\\Gamma \\left(-\\epsilon +n_1+n_3+n_4\\right) \\Gamma \\left(-2 \\epsilon +n_1+n_2+n_3+n_4-1\\right) \n \\nonumber \\\\ & \\times \\frac{(-u_1 u_4/u_3)^{n_1} (-u_2/u_3)^{n_2} (-u_1)^{n_3} (-u_2 u_4/u_3)^{n_4}}{n_1! \\, n_2! \\, n_3! \\, n_4!} \n\\end{align}\n\n\n"} +{"id": "red-arxiv-12", "source_id": "red-arxiv_12_red-arxiv-12", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\n\n\n\\section{\\label{sec:Introduction}Introduction}\nQuantum computing (QC) promises superior capabilities in solving certain hard computational tasks which classical computers cannot solve in a reasonable time \\cite{harrow2017quantum, nielsen2002quantum}. It has been shown theoretically that quantum computers can be used to factorize large numbers and, hence,\nbreak state-of-the-art public key cryptography systems \\{almost\\} exponentially \\{superpolynomially\\}\nfaster than classical computers~\\cite{shor1994algorithms}, and perform unstructured database searches with quadratic speedup~\\cite{grover1996fast}. \nSuch powerful algorithms require a quantum device with many qubits and a deep quantum circuit to carry out meaningful applications.\nDue to the stringent requirement on coherence, a vital necessity lies in keeping qubits coherent during operation when they are susceptible to noise and error-prone.\nAs opposed to classical error correction methods dealing mainly with bit-flip errors~\\cite{chang2006introduction}, quantum computers need to account for multiple types of errors, such as internal phase-flip errors, in addition to the bit-flip errors \\cite{gottesman2002introduction, jayashankar2022quantum}.\nConsequently, a small phase-angle error, or generally unitary error, close to the identity\nin a qubit could accumulate as multiple iterations take place. \nOne way to address these and achieve the goal of a large functioning quantum device is to implement \\emph{quantum error correction} (QEC)~\\cite{shor1995scheme,steane1996error}.\nThe properties of quantum information pose challenges when developing QEC methods. \nThe no-cloning theorem prevents us from directly duplicating an unknown quantum state~\\cite{wooters1982nocloning}, and earlier works of Shor~\\cite{shor1995scheme} and Steane~\\cite{steane1996error}, \nas well as others \\cite{lidar2013quantum}, proposed ways to overcome this by encoding the logical states over multiple physical qubits in certain forms of entanglement.\n\n\n\nMost of the QEC decoding strategies are specific for a particular code realization. Moreover, it is challenging to implement the QEC strategies (including encoding and syndrome measurement) on the quantum hardware because it is very difficult to generate a precise model to capture all the dynamics of imperfect quantum hardware. Machine learning algorithms offer a solution to circumvent these issues and have a huge potential to outperform the current error correction strategies. Neural networks (NN) have been explored to enable the learning of error correction strategies for NISQ devices. It has been shown that with sufficient training, ML-based QEC can outperform conventional error-correction strategies \\cite{Florian2018}, however, these techniques may not scale well as the number of qubits increases exponentially. \n\nNN-based decoders (also known as neural decoders) for topological codes can help to simplify the complex decoding algorithms to deduce the error syndrome as the size of quantum systems increases. Recently, stochastic NNs using the Boltzmann machine model have been proposed to realize neural decoders for topological codes. These decoders can accommodate different architectures and noise models. The NN learns decoding strategies by directly accessing the raw data obtained from measurements done on the quantum hardware \\cite{Torlai2017}.\nAnother non-trivial task related to the error syndrome is to be able to predict the probability distribution of the errors for a QEC code. In \\cite{krastanov2017deep}, NNs were deployed to encode this information. \nEven though this method is not scalable due to the probabilistic sampling, it paves the way to use more complex NN that can provide faster decoding as well as higher threshold values. Preliminary work on faster decoding was explored in \\cite{varsamopoulos2017decoding,bhoumik2021efficient} for the case of a small surface code.\n\nReinforcement learning (RL) is a machine learning method that learns how to make decisions via trial and error. Recent advances in deep neural networks (DNN) further empower RL capabilities and have already shown superhuman performance in solving complex sequential decision-making challenges such as playing the game of Go~\\cite{silver2018general}. RL has been applied\nto \nquantum control~\\cite{sivak2022model, bukov2018reinforcement, niu2019universal}, quantum architecture search \\cite{fosel2021quantum, kuo2021quantum}, and quantum error correction \\cite{nautrup2019optimizing, andreasson2019quantum}. However, there is a concerning issue for RL, which is that classical simulation of training an RL agent is computationally expensive\nand time-consuming. Even worse, trained RL agents can only perform well when working on the task specified in their training and they cannot usually be generalized to different tasks even when the tasks are pretty similar~\\cite{cobbe2019quantifying}. This poses a challenge when dealing with a quantum computing environment because the device noise changes over time \\cite{proctor2020detecting}. To address this challenge, we make an addition to the traditional RL method to improve its performance in different quantum noise environments.\n\nIn this paper, we propose a continual RL approach to tackle the decoding problem with changing noise. \nSpecifically, we construct a double deep $Q$-learning network (DDQN) agent equipped with probabilistic policy reuse (PPR) algorithm to improve the learning efficiency via utilizing previously learned knowledge. We show that the proposed framework can significantly reduce the required training episodes compared to training from scratch. In addition, we show that by increasing the number of trained policies in the policy library, the RL training can be better than training it with a smaller policy library.\nThe paper is organized as follows. In \\sectionautorefname{\\ref{sec:QuantumErrorCorrection}}, we provide a background on QEC and the basic idea of surface codes. In \\sectionautorefname{\\ref{sec:DeepReinforcementLearning}}, we introduce the RL methods used in this work. In \\sectionautorefname{\\ref{sec:ProbabilisticPolicyReuse}}, we describe the probabilistic policy reuse (PPR) framework used to extend RL agents. In \\sectionautorefname{\\ref{sec: Method}}, we provide the details of the experimental setup such as the quantum simulation environment and the RL training parameters. We demonstrate the results in \\sectionautorefname{\\ref{sec:ExpAndResults}}. Finally, we discuss the results in \\sectionautorefname{\\ref{sec:Discussion}} and conclude the paper in \\sectionautorefname{\\ref{sec:Conclusion}}.\n\n\n\n\\section{\\label{sec:QuantumErrorCorrection}Quantum Error Correction and Surface Codes} \nThere has been tremendous development in finding physical systems that can be used as qubits to encode quantum information. Some examples include neutral atoms \\cite{Henriet2020quantumcomputing}, NMR-spin qubits \\cite{vandersypen2001experimental}, NV-centers \\cite{chen2019universal}, photonics \\cite{slussarenko2019photonic,knill2001scheme,bartolucci2021fusion}, superconducting qubits \\cite{national2019quantum,rosenblum2018cnot, lardinois2020ibm, meng2021cloud}, trapped ions \\cite{allen2017reconfigurable,bohnet2016quantum}. \n\nHowever, qubits are very sensitive to the noise from their environment, implying that the physical qubits are not able to carry out reliable logical computation and therefore limit the potential application of quantum computing. For fault-tolerant quantum computing, these physical qubits are required to build logical qubits, through \\textit{Quantum Error Correction} (QEC).\n\nQEC uses several physical qubits to make a single \\emph{logical qubit}. In classical error correction, we can simply duplicate the information and use multiple bits to store the information. However in QEC, due to the fundamental limitations of the \\emph{no-cloning theorem}, we cannot copy the unknown quantum information directly, hence we cannot \\textcolor{purple}{directly} apply what we do in the classical world. Scientists have discovered several ways to entangle the qubits so that the quantum information is distributed into a set of physical qubits such that when some of the qubits fail, we can still recover the information. \n\nThe basic idea of a QEC scheme is to define how to encode the quantum information with multiple qubits and how to perform \\emph{parity checks} to know what errors actually occur \\cite{devitt2013quantum, lidar2013quantum, roffe2019quantum, gottesman2002introduction, gottesman2010introduction}. After collecting enough information, the corresponding recovery routines can be carried out to correct the quantum state. \nThere are several approaches to implementing QEC codes in a fault-tolerant setting. QEC codes that use four data qubits have been demonstrated \\cite{corcoles2015demonstration, takita2017experimental,linke2017fault,andersen2020repeated}, but they cannot correct the error identified.\nClassical repetition codes have also been used to correct errors \\cite{cory1998experimental,chiaverini2004realization,reed2012realization,riste2015detecting,gunther2021improving,Wootton2018,google2021exponential}, however trying to make them scalable for larger quantum systems is still an open research question.\nA leading approach to scalable quantum computing in the NISQ era is to use topological codes \\cite{fowler2012surface}. A prominent topological code is the surface code as it has an error threshold of $1\\%$, and is compatible with planar architecture \\cite{Raussendorf2007}.\n\n\\subsection{Stabilizer Formalism}\nMost codes are stabilizer codes; one measures stabilizer operators to obtain error syndromes and uses this information to correct errors. The stabilizer formalism is a mathematical framework to describe QEC schemes and was first introduced by Daniel Gottesman \\cite{gottesman1997stabilizer, gottesman2010introduction}. A quantum state $\\ket{\\psi}$ is defined to be stabilized by some operator $M$ if it is a $+1$ eigenstate of $M$: $M\\ket{\\psi} = \\ket{\\psi}$.\nAn $N$-qubit stabilizer state $\\ket{\\psi}_{N}$ is defined by the $N$ generators of an abelian\nsubgroup $\\mathcal{G}$ of the $N$-qubit Pauli group $\\mathcal{P}_{N}$,\n\\begin{equation}\n\\label{eqn:stabilizer_definition}\n\\left.\\mathcal{G}=\\left\\{M^{i}\\left|M^{i}\\right| \\psi\\right\\rangle=|\\psi\\rangle,\\left[M^{i}, M^{j}\\right]=0, \\forall(i, j)\\right\\} \\subset \\mathcal{P}_{N}\n\\end{equation}\nConsider the stabilizer group $\\mathcal{S}$, the \\emph{codespace} is the set of quantum states $\\ket{\\psi}$ which are simultaneous eigenvectors of $M \\in \\mathcal{S}$ with eigenvalue $+1$. If a certain error happens, meaning that a Pauli operator $X$ or $Z$ acted on the quantum state, then some of the parity qubits may report eigenvalue $-1$, indicating that there is an error. The measurement results of parity qubits can be used to find out which qubits have errors and corresponding recovery procedures can be applied.\n\\subsection{Surface Code}\n\\begin{figure}[htbp]\n \\centering\n \\scalebox{0.21}{\n \\centering\n \\includegraphics[trim={0 14cm 0 0},clip]{figures/diagrams/SurfaceCode.pdf}\n }\n \\caption{{\\bfseries Surface Code Diagram. } (a) A $d\\times d$ square lattice ($d=5$) surface code diagram. The green (blue) plaquettes represent the independent parity checks for Pauli $X$ ($Z$) flips. Each vertex of the colored stabilizers contains a physical data qubit. (b) The $2 \\times 2$ square lattice figures demonstrate the Pauli flips errors and the effect on the surrounding colored stabilizers. We can see that if there is a $X$ error, then two $Z$ parity checks will be in the eigenvalue $-1$. On the other hand, if there is a $Z$ error, then two $X$ parity checks will be in the eigenvalue $-1$. Since the $Y$ error is the combination of $X$ and $Z$ error, we can see that $Y$ error will make both $X$ and $Z$ parity checks to be $-1$.} \n \\label{Fig:surface}\n\\end{figure}\nVarious QEC frameworks have been developed under the stabilizer formalism. One of the most promising works is the \\emph{surface code} \\cite{bravyi1998quantum, fowler2012surface}. \n\nThe surface code is a version of Kitaev's toric code \\cite{kitaev2003fault, fujii2015quantum}, where the periodic boundary conditions are replaced by open boundary conditions \\cite{bravyi1998quantum,freedman2001projective}. \nIn this work, we consider the surface code used to encode a single qubit. In \\figureautorefname{\\ref{Fig:surface}} we show the $d\\times d$ lattice, where each vertex represents a physical qubit. The independent parity checks for $X$ ($Z$) are represented by the green (blue) plaquettes and are modified at the boundary to act on three qubits.\nIn order to measure the error without destroying the logical qubits, we need to perform projective measurements on ancilla qubits. These types of measurements are known as stabilizer measurements. The outcome of these ancilla qubit measurements constitutes the \\emph{syndrome}, which informs us whether an error has occurred on the logical qubit. \n\n\n\\subsection{\\label{subsec:decoding}Decoding Scheme}\nSyndrome measurements allow for the usage of a decoding scheme to i) process any information about errors that have occurred and ii) correct those errors appropriately. Depending on which error correction code is used, it is possible to obtain information about the existence, location, and type of error by using syndrome measurements. With this information, a decoder can then correct the errors that were detected and have the means to verify its success (through conducting subsequent syndrome measurements).\n\nSince errors occur continuously over time, the decoding scheme can be treated as a process that also occurs continuously. The aim of such a scheme would still be to correct errors, with the added stipulation to do so for as long as possible (thus extending the \\textit{lifetime} of the qubit). This has led to the development of decoders that successfully utilize machine learning as a means to continuously combat errors \\cite{kim2020quantum,convy2022machine}. \nIn our work, we utilize reinforcement learning to train our agent to act as a decoder for the surface code.\n\\section{\\label{sec:DeepReinforcementLearning}Deep Reinforcement Learning}\n\\emph{Reinforcement learning} (RL) is a learning strategy where the training is based on learning from experience, and building strategies to solve the given problem. For a more detailed discussion on RL, we refer the readers to \\cite{sutton2018reinforcement}. \nGenerally, RL is comprised of two main elements: an \\emph{agent} and an \\emph{environment}. The \\emph{agent} interacts with the \\emph{environment} $\\mathcal{E}$ over a collection of discrete time steps, where $\\mathcal{E}$ contains the necessary information to describe the problem at hand. It contains the rules of the game, the set of all possible states $s \\in S $ (observations), and feedback on the quality of the actions taken by the agent. At any time step $t$, the agent receives a state $s_{t}$ from the $\\mathcal{E}$, and chooses an \\emph{action}, $a_{t} \\in \\mathcal{A}$. This is done with respect to the agent’s \\emph{policy} $\\pi$, which maps $s_t$ to action $a_t$. The \\emph{policy} $\\pi$ is generally, stochastic in nature, such that for a given $s_{t}$, the action output is a probability distribution $\\pi(a_{t}|s_{t})$. Now, an \\emph{episode} in the training is defined as the agent starting with some random initial state $s_{0}$ and interacting with the $\\mathcal{E}$ following the aforementioned process. At any time $t$, after executing the action $a_{t}$, the agent receives the state of the next time step $s_{t+1}$ and a scalar \\emph{reward} $r_{t}$. This process continues until the agent satisfies a pre-defined stopping criterion or a terminal state. \n\nThe reward depends on the state and the action taken. The agent gets a higher reward when it progresses towards the goal and a penalty when performing bad actions. Concretely speaking, the objective of any RL problem is to find an optimal policy $\\pi^{*}$ that maximizes the rewards. The total discounted return from time step $t$ is defined as $R_t = \\sum_{t'=t}^{T} \\gamma^{t'-t} r_{t'}$, where $\\gamma$ is the discount factor that lies in $(0,1]$. Here $\\gamma$ is the parameter that controls how future rewards are weighted to the decision making function. When a large $\\gamma$ is considered, the agent weighs the future reward more heavily. On the other hand, with a small $\\gamma$, the agent weighs the immediate reward more.\nThe expected return for selecting an action $a$ in state $s$ based on policy $\\pi$ is defined as the \\emph{action-value function} or \\emph{$Q$-value function} $Q^\\pi (s,a) = \\mathbb{E}[R_t|s_t = s, a]$. The optimal action value function $Q^*(s,a) = \\max_{\\pi} Q^\\pi(s,a)$ gives a maximal action-value across all possible policies. The value of state $s$ under policy $\\pi$, $V^\\pi(s) = \\mathbb{E}\\left[R_t|s_t = s\\right]$, is the agent's expected return by following policy $\\pi$ from the state $s$. The RL algorithms which maximize the value function are called \\emph{value-based} RL.\n\n\\subsection{\\textit{Q}-Learning}\n$Q$-learning \\cite{sutton2018reinforcement} is one of the most widely used model-free approaches in RL. In $Q$-learning, the agent learns the optimal action-value function and is an \\emph{off-policy} algorithm.\nThe learning begins by arbitrarily initializing the value function $Q^{\\pi}(s,a) \\forall s\\in S, a\\in \\mathcal{A}$, typically stored in a table known as the $Q$-table. The estimates for $Q^{\\pi}(s,a)$ are then progressively updated according to policy using the Bellman equation: \n\\begin{align}\n Q\\left(s_{t}, a_{t}\\right) \\leftarrow Q\\left(s_{t}, a_{t}\\right)\n +\\alpha\\left[r_{t}+\\gamma \\max _{a} Q\\left(s_{t+1}, a\\right)-Q\\left(s_{t}, a_{t}\\right)\\right].\n\\end{align}\n\n\\subsection{\\label{subsec:DDQ}Double Deep \\textit{Q}-Learning}\nAlthough the previously explained method of $Q$-learning gives the optimal action-value function, it is not feasible for problems that require larger memory. For example, it would be very difficult to deal with problems with high dimensions of state $s$ or action $a$. In order to get around this memory requirement, neural networks (NN) are used to efficiently represent $Q^{\\pi}(s,a) \\forall s \\in S, a \\in \\mathcal{A}$. This method of using NNs to learn $Q$-values is known as \\emph{deep $Q$-learning} and the network is called a deep $Q$-network (DQN) \\cite{mnih2015human}. \n\nIn order to stabilize the DQN, we use \\emph{experience replay} and an additional network known as the \\emph{target network} \\cite{mnih2015human}. In experience replay, the agent stores the experiences encountered during the episodes in a memory which stores the transition tuple, $\\{s_{t}, a_{t}, r_{t}, s_{t+1}\\}$. After gathering enough experiences, the agent randomly samples a batch of experiences, computes the loss, and updates the DQN parameters. Additionally, in order to reduce the correlation between the target and prediction, a clone of DQN, known as a \\emph{target network} is used. The DQN parameters $\\theta$ are updated at every iteration while the target network parameters $\\theta^{-}$ are updated every few iterations. The DQN learning is done via minimizing the mean square error (MSE) loss function:\n\\begin{equation}\n L(\\theta)=\\mathbb{E}\\left[\\left(r_{t}+\\gamma \\max _{a^{\\prime}} Q\\left(s_{t+1}, a^{\\prime} ; \\theta^{-}\\right)-Q\\left(s_{t}, a_{t} ; \\theta\\right)\\right)^{2}\\right]\n\\end{equation}\n\n\nIn this work, we extend this to implement \\emph{Double Deep $Q$-learning}, as sometimes DQN can overestimate the action-value function \\cite{hasselt2016qlearning}. The idea behind of double deep $Q$-learning is to decompose the max operation in the target $y^{DQN}_{t} = r_{t}+\\gamma \\max _{a^{\\prime}} Q\\left(s_{t+1}, a^{\\prime} ; \\theta^{-}\\right)$ into two separate operations: \\emph{action selection} and \\emph{action evaluation}. The action selection is based on the policy network, $\\operatorname{argmax}_{a} Q\\left(s_{t+1}, a ; \\theta\\right)$ and then the target network is used to evaluate the action, $Q\\left(s_{t+1}, \\operatorname{argmax}_{a} Q\\left(s_{t+1}, a ; \\theta\\right), \\theta^{-}\\right)$. The DDQN target is now $y^{DDQN}_{t} = r_{t}+\\gamma Q\\left(s_{t+1}, \\operatorname{argmax}_{a} Q\\left(s_{t+1}, a ; \\theta\\right), \\theta^{-}\\right)$. The loss function $L(\\theta)$ is therefore:\n\n\\begin{equation}\n L(\\theta)=\\mathbb{E}\\left[\\left(r_{t}+\\gamma Q\\left(s_{t+1}, \\operatorname{argmax}_{a} Q\\left(s_{t+1}, a ; \\theta\\right), \\theta^{-}\\right) -Q\\left(s_{t}, a_{t} ; \\theta\\right)\\right)^{2}\\right]\n\\end{equation}\n\nThen, $\\theta$ is updated using the gradient descent method and every few iterations we update the target network $\\theta^{-}\\leftarrow\\theta$.\n\\section{\\label{sec:ProbabilisticPolicyReuse}Probabilistic Policy Reuse}\n\\begin{figure}[htbp]\n \\centering\n \\scalebox{0.25}{\n \\centering\n \\includegraphics{figures/diagrams/PPRfig.pdf}\n }\n \\caption{{\\bfseries Overview of Policy Reuse for Decoding.} Shown above is the cyclic process of the RL agent correcting errors on the surface code environment for as long as possible. Within the \\textit{Decoding Process} box, we see that the surface code is constantly being subjected to noise, creating errors on the physical qubits stationed at each point on the surface code lattice. The errors are detected in the form of a syndrome extraction and fed to the RL agent. In the RL Agent box, we see that the agent has access to a policy library that has stored information previously gathered from other noise environments. The RL agent can leverage these policies when considering the best course of action to enact on the current surface code environment under consideration. }\n \\label{Fig:ppr}\n\\end{figure}\nAs mentioned in \\sectionautorefname{\\ref{subsec:decoding}}, errors can occur continuously while the qubit is operating and ML-based decoders can be used to process and correct this stream of errors.\n\\textit{Probabilistic policy reuse (PPR)} is one implementation of \\textit{continual learning}, which is a subset of machine learning specifying the ability to learn from information that is accumulated over time. To do this, a policy reuse algorithm in particular stores and makes use of its previously solved solutions (in the form of policies/models) to aid in its computation of an upcoming task. This method has been previously demonstrated with other architectures such as: utilizing PPR in conjunction with tabular $Q$-learning \\cite{fernandez2006probabilistic} and deep $Q$-networks \\cite{ye2021quantum}. \\emph{Our algorithm will utilize this method with double deep $Q$-learning to conduct quantum error correction on the surface code.}\n\nTo further elaborate on how PPR operates, the previous policies are stored in a policy library, $L$, and sampled probabilistically at the beginning of each learning episode. This process acts outside, but in conjunction with, the DDQN framework discussed previously in \\sectionautorefname{\\ref{subsec:DDQ}} and utilizing the agent without any policies loaded into $L$ can be thought of as training the agent from scratch with just the DDQN. After each episode, a score is calculated to represent how well the policy performed. This score then allows for an update to the probabilistic weights associated with each policy stored in $L$, allowing for the better-performing policies to gain a greater probability of being chosen in the next episode. \n\nAs stated earlier, the policy reuse process acts outside the DDQN as shown in \\figureautorefname{\\ref{Fig:ppr}}.\nThe policy to be examined for a given episode is sampled from the policy library according to the softmax equation:\n\\begin{equation}\n P(\\Pi_{j}) = \\frac{e^{\\tau W_{j}}}{\\sum_{p = 0}^{n}e^{\\tau W_{p}}}\n \\label{eqn:softmax}\n\\end{equation}\nwhich outputs a probability vector $P$, with each vector component corresponding to the probability assigned to a given policy. $P$ is dependent on the current reward of each of the policies $W$, and a temperature parameter $\\tau$.\n\nThe aim of incorporating PPR on top of the DDQN is to enhance the performance of the agent by leveraging information from previous solutions rather than starting from scratch when encountering a new task. This is especially relevant when considering how to correct errors such as the bit-flips described in \\sectionautorefname{ \\ref{sec:QuantumErrorCorrection}} because the algorithm can recall policies that have previously solved similar error configurations to aid in its management of new errors.\n\\section{\\label{sec: Method}Method\n\\subsection{Environment and Encoding of the agent}\nThe summary of the environment and the interaction of the agent in this section is based on the work presented in \\cite{Sweke2020}. For more details, we encourage the readers to refer to this paper.\nFirst, we outline the encoding of the environment state. The environment state $s_{t}$ at time $t$ is constructed by layering the action history slices and faulty syndrome slices together. \nIn the \\emph{environment} module, the $d\\times d$ surface code lattice is encoded into a $(2d+1) \\times (2d+1)$ binary lattice, enabling us to distinguish the $X$ flips and $Z$ flips. This kind of encoding allows us to input the state $s_{t}$ to a deep convolutional neural network (CNN). The function of the CNN is to isolate a particular patch of the lattice which contains information about the syndrome volume and previous actions. \nThe output from the CNN layer is then fed to a feed-forward neural network. The final layer of this neural network encodes the $Q$-value, $Q(s_{t},a)$, where \\emph{a} is the action. \nNext, we describe the parameterization of the $Q$-function using the deep CNN, which forms the deep $Q$-network. The deep $Q$-network is made up of convolutional layers, feed-forward neural layers, and a final layer that provides $Q$-values for the corrections of the different input states.\n\nFinally, we describe the training of the agent as it interacts with the environment to yield the optimal $Q$-value. \nIn this setting, the agent is trying to solve the decoding problem and converge on a strategy that maximizes the discounted cumulative reward. Every new episode is initiated by extracting a new syndrome volume (which is faulty due to the presence of measurement error with probability $p_{\\rm{meas}}$) and resetting the action history to zeros. The extracted syndrome and the reset action history are then fed to the agent as described before\nThe agent will then choose an action according to the exploration/exploitation strategy. Next, the chosen action is applied to the surface code and the error configuration is updated, as well as the action history. \n\nIn order to determine the agent's reward, the perfect syndrome, with respect to the updated error configuration, is fed to the referee decoder. The referee decoder, given a perfect syndrome, is used to suggest corrections to move the current state back to the codespace.\nThe referee decoder is a fast feed-forward NN trained using supervised learning based on \\cite{varsamopoulos2017decoding,varsamopoulos2019neural,chamberland2018deep}.\nIf the referee decoder is able to successfully decode the current syndrome, the agent remains alive; otherwise, the agent dies and the episode ends. If the action chosen by the agent restores the state back to the original state, the agent gets a reward of $1$, otherwise, it gets a reward of $0$. \nAt the end of a complete learning episode, the cumulative number of correct actions the agent successfully to stabilize the qubit is extracted as the qubit lifetime.\nThe above process continues until the agent chooses identity as an action, which implies that the agent is confident that it has applied all the necessary actions to return the code to the desired initial state. This leads to a new syndrome volume, and a new state is constructed from the reset action history and the updated syndrome. The new state is fed to the agent and once again the episode continues as described previously, until the agent dies. \nIn the following section, we use the above environment and training steps to implement our approach to the decoding problem via the probabilistic policy reuse. \n\\setlength{\\tabcolsep}{0.3em}\n\\begin{table}[H]\n\\centering\n\\begin{tabular}{|c|c|ccccc|}\n\\hline\n & & \\multicolumn{5}{c|}{Environments} \\\\ \\hline\n &\n &\n \\multicolumn{1}{c|}{\\emph{Environment-0}} &\n \\multicolumn{1}{c|}{\\emph{Environment-1}} &\n \\multicolumn{1}{c|}{\\emph{Environment-2}} &\n \\multicolumn{1}{c|}{\\emph{Environment-3}} &\n \\emph{Environment-4} \\\\ \\hline\n\\multirow{5}{*}{\\rotatebox[origin=c]{90}{error-probabilities}} &\n 0.003 &\n \\multicolumn{1}{c|}{Output} &\n \\multicolumn{1}{c|}{Input} &\n \\multicolumn{1}{c|}{Input} &\n \\multicolumn{1}{c|}{Input} &\n Input \\\\ \\cline{2-7} \n & 0.005 & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{Output} & \\multicolumn{1}{c|}{Input} & \\multicolumn{1}{c|}{Input} & Input \\\\ \\cline{2-7} \n & 0.007 & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{Output} & \\multicolumn{1}{c|}{Input} & Input \\\\ \\cline{2-7} \n & 0.011 & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{Output} & Input \\\\ \\cline{2-7} \n & 0.015 & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & \\multicolumn{1}{c|}{--} & Output \\\\ \\hline\n\\end{tabular}\n\\caption{This table shows the sequence of different environments corresponding to the different error-probability $p_{\\rm{err}}$. Based on the PPR method, previous policies are sampled from the policy library and used as inputs in the subsequent environments. The \\emph{Environment-0}, which is the basis for all of the following experiments, will be trained from scratch with $p_{\\rm{err}} = 0.003$ using DDQN.}. \n\\label{table:1}\n\\end{table}\n\\subsection{\\label{sec:ML model}Machine Learning Model}\n\n\\subsubsection{\\label{sec: ExpSetup}\\textbf{Experimental Setup}}\nIn this section, we implement deep reinforcement learning (DRL) and PPR to solve the decoding problem. We first train the agent from scratch for the given noise model and compare it with the cases where we use the PPR algorithm. Our goal is to demonstrate that the agent can solve the decoding problem, using previously solved models in different environments, faster than if it was to solve it from scratch. \n\\subsubsection{\\label{sec: Noise}\\textbf{Noise model}}\nWe consider a simpler noise model where the $X$ and $Z$ errors are uncorrelated. Since correcting independent $X$ or $Z$ error is equivalent, it is sufficient to develop our algorithm to correct one type of error.\nWe test our agent's decoding ability for the bit-flip noise model ($X$-noise model) and observe the performance of our training algorithm under different noise environments.\n\\subsubsection{\\label{sec: DDQN}\\textbf{Deep Q-Network Setup}}\nWe use the Double Deep $Q$-Network (DDQN) (described in \\sectionautorefname{\\ref{subsec:DDQ}}) in all our simulations for both the training from scratch method and the PPR algorithm. For a given learning episode, the agent observes its current state at each time step $t$ and samples from an action set $\\mathcal{A}$ of Pauli flips ($X$ or $Z$) and a special \\textit{request new syndrome}, to choose one as the action which it believes will drive it towards the correct solution. In the current environment setting the agent is restricted to single-qubit operations.\n\nWe implement DDQN using the PyTorch packages. We kept the architecture of the neural network similar to the original paper \\cite{Sweke2020}, using three layers of 2-dimensional CNN and two feed-forward linear networks. The kernel size and stride for the first CNN layer is $[3,2]$ respectively, and for the last two layers it is $[2,1]$ respectively.\nThe input to the DDQN is the state $s_{t}={s_{\\rm{sv},t},~h_{t}}$, where $s_{\\rm{sv},t}$ is the faulty syndrome volume and $h_{t}$ is the action history list. \nNext, we describe the implementation of our PPR algorithm. We first train the agent from scratch for a very small error-probability ($p_{\\rm{meas}}=p_{\\rm{phys}}=0.003$), \\emph{Environment-0}. The probability of an error occurring on a single physical qubit is given by $p_{\\rm{phys}}$ and the probability of error during syndrome measurement is given by $p_{\\rm{meas}}$. In our simulations, we set $p_{\\rm{phys}}=p_{\\rm{meas}}=p_{\\rm{err}}$. Therefore, in the rest of the work, we will use $p_{\\rm{err}}$ to describe the error-probabilities. \nWe then load this policy to solve for \\emph{Environment-1}, where we increase the error-probability, making it a harder environment to solve. This makes it more complex for the agent to suppress the errors. Next, we combine the previous two models and load them to solve for the next environment which has a higher error-probability and is more difficult to solve. We show a schematic of our framework in \\figureautorefname{\\ref{Fig:ppr}}, and a table showing the sequence of the different environments used in building the policy library in \\tableautorefname{\\ref{table:1}}\n\n\\begin{figure}[b]\n \\centering\n\n \n \\begin{subfigure}[b]{1\\columnwidth}\n \\includegraphics[angle=0,width=1\\columnwidth]{figures/from_scratch_0.003a.pdf}\n \n \\end{subfigure}\n \n \\caption{\\emph{Environment-0} training from scratch simulation using DDQN, for a very small error-probability of $p_{\\textrm{err}} = 0.003$}\n \\label{fig:Env0}\n\\end{figure}\n\\begin{figure}[b]\n\\centering\n\\begin{subfigure}[b]{1.\\textwidth}\n \\includegraphics[width=1\\textwidth]{figures/from_scratch_0.005a.pdf}\n \\caption{Training from scratch simulation for \\emph{Environment-1} using DDQN.}\n \\label{fig0.005scratch}\n\\end{subfigure}\n\\hfill\n\\begin{subfigure}[b]{1.\\textwidth}\n \\includegraphics[width=1\\textwidth]{figures/Fig2_reuse0.005.pdf}\n \\caption{Policy reuse simulation for \\emph{Environment-1}, using \\emph{Environment-0} policy from the policy library.}\n \\label{fig0.005_env1}\n\\end{subfigure}\n \\caption{(Color online) Training from scratch and policy reuse results for \\emph{Environment-1}: Single bit-flip error rate and a measurement error rate of 0.005. (a) Qubit lifetime as a function of episodes, when trained from scratch. We implement a DDQN neural network to train the agent. (b) Qubit lifetime as a function of episodes, when using previously trained policy from \\emph{Environment-0}, $p_{\\textrm{err}}=0.003$. We use the qubit lifetime of 500 as the baseline to compare the results between the training from scratch and policy reuse methods respectively. }\n\\label{fig0.005}\n\\end{figure}\n\n\\paragraph*{\\textbf{\\label{sec:PPRH}Probabilistic Policy Reuse Algorithm Hyper-parameters}} For the PPR algorithm, we assigned the following values to our hyper-parameters: For the temperature parameter $\\tau$ (which is updated during each episode), we set the initial value to\nbe $0$. It increases incrementally with the number of episodes that have passed by $\\delta\\tau$, which we set as $0.01$. We set the number of total episodes $K$ to be $10000$ and the maximum number of steps in a given episode, $H$, was set to be $1000$. For the replay memory $D$ we initialized a capacity of $10000$ transition pairs.\nThe learning rate for the ADAM optimizer \\cite{kingma2014adam} was set to $0.001$. Additionally, we have the following hyper-parameters for the $\\pi$-exploration\nalgorithm: We assign the initial value (of the probability of following a previous policy)\nto be $1$ and the value for $\\nu$ (the decay factor of $\\psi$ ) to be $0.95$. Also, we calculate the loss using the SMOOTH L1 LOSS function in PyTorch \\cite{ren2015faster}.\n\n\\section{\\label{sec:ExpAndResults}Experiments and Results}\n\\subsection{\\emph{Environment-0}: error-probability=0.003 \nWe first run a simulation to solve the relaxed-decoding problem in an environment with a very low probability error. We do this to start building a policy library for the PPR algorithm. \nWe train the agent from scratch to solve the environment using a DDQN. From \\figureautorefname{\\ref{fig:Env0}} we see that the agent starts converging around $6000^{th}$ episode. The policy generated after the completion of $10000$ episodes will be added to the policy library to be used in harder environments.\n\n\\begin{figure}[t\n\\centering\n\\begin{subfigure}[b]{1\\linewidth\n \\includegraphics[trim=0 -0 0 0,angle=0,width=1\\linewidth]{figures/from_scratch_0.007.pdf}\n \\subcaption{Training from scratch simulation for \\emph{Environment-2.}}\n \\label{fig0.007scratch}\n\\end{subfigure}\n\\begin{subfigure}[b]{1\\linewidth}\n \\includegraphics[trim=0 -0 0 0,angle=0,width=1\\linewidth]{figures/fig3_reuse0.007.pdf}\n \\subcaption{Policy reuse simulation for \\emph{Environment-2}. Policy library: Policy trained from scratch for \\emph{Environment-0} and policy trained for \\emph{Environment-1} described in \\ref{sec:0.005} .}\n \\label{fig0.007reuse}\n\\end{subfigure}\n \\caption{(Color online) Training from scratch and policy reuse results for \\emph{Environment-2}: Single bit-flip error rate and a measurement error rate of 0.007. }\n\\label{fig0.007}\n\\end{figure}\n\n\\subsection{\\label{sec:0.005}\\emph{Environment-1}: error-probability=0.005}\n\\begin{figure\n\\centering\n\\captionsetup[subfigure]{justification=centering}\n\\begin{subfigure}[b]{0.45\\linewidth}\n\\centering\n\\includegraphics[width=\\linewidth]{figures/from_scratch_0.011a.pdf}\n \n \\subcaption{Training from scratch simulation for \\emph{Environment-3}}\n \\label{fig0.011scratch}\n \\vspace{4ex}\n\\end{subfigure}\n\\quad\n\\begin{subfigure}[b]{0.45\\linewidth}\n\\centering\n \\includegraphics[width=\\linewidth]{figures/fig4_PR1_0.005_perr_0.011.pdf}\n \\subcaption{Policy reuse simulation for \\emph{Environment-3}, using \\emph{Environment-0} policy from the policy library.\n }\n \\label{fig0.011_env0}\n \n \\end{subfigure}\n\\quad\n\\begin{subfigure}[b]{0.45\\linewidth}\n\\centering\n \\includegraphics[width=\\linewidth]{figures/fig4_PR2_0.005_7_perr0.011.pdf}\n \\subcaption{Policy reuse simulation for \\emph{Environment-3}, using \\emph{Environment-0}, \\emph{Environment-1} policies from the policy library. }\n \\label{fig0.011_env0_env1}\n \\vspace{4ex}\n\\end{subfigure}\n\\quad\n\\begin{subfigure}[b]{0.45\\linewidth}\n\\centering\n \\includegraphics[width=\\linewidth]{figures/fig4_0.011_three_PR.pdf}\n \\subcaption{Policy reuse simulation for \\emph{Environment-3}, using \\emph{Environment-0}, \\emph{Environment-1}, \\emph{Environment-2} policies from the policy library. }\n \\label{fig0.011_env0_env1_env2}\n \\vspace{4ex}\n\\end{subfigure}\n \\caption{(Color online) Training from scratch and policy reuse results for \\emph{Environment-3}: Single bit-flip error rate and a measurement error rate of 0.011. We observe that as the number of policies increases the agent is able to attain higher qubit lifetime, as well as converges faster.}\n\\label{fig0.011}\n\\end{figure}\nNext, we apply the policies from \\emph{Environment-0} to the next setting, \\emph{Environment-1}, where the error-probability $p_{\\rm{err}}$ is $0.005$. We compare the two simulations where we train the agent from scratch to solve \\emph{Environment-1}, as well as using the PPR algorithm. In \\figureautorefname{\\ref{fig0.005scratch}} and \\ref{fig0.005_env1}, we use the qubit lifetime of $500$ as the baseline as \\cite{Sweke2020} is able to achieve an average qubit lifetime of $500$ using their model. \nIn the training from scratch, the agent stabilizes around the $5000^{th}$ episode (\\figureautorefname{\\ref{fig0.005scratch}}), whereas the PPR algorithm as shown in \\figureautorefname{\\ref{fig0.005_env1}} achieves a higher qubit lifetime and stabilizes around the $3000^{th}$ episode. Additionally, when we compare the raw qubit lifetime in \\figureautorefname{\\ref{fig0.005scratch}} and \\ref{fig0.005_env1}, we see that the PPR algorithm achieves a score greater than 2000, which is not achieved when we do training from scratch. \n\\begin{figure}[t\n\\centering\n \\begin{subfigure}[t]{1\\textwidth}\n \\centering\n \\includegraphics[width=1\\textwidth]{figures/from_scratch_0.015.pdf}\n \n \\subcaption{Training from scratch simulation for \\emph{Environment-4}}\n \\label{fig0.015scratch}\n \\end{subfigure}\n \\quad\n \\begin{subfigure}[t]{1\\textwidth}\n \\centering\n \\includegraphics[width=1\\textwidth]{figures/fig5_PR1_0.011_perr_0.015.pdf}\n \n \\subcaption{Policy reuse simulation for \\emph{Environment-4} using \\emph{Environment-3} policy from the policy library.}\n \\end{subfigure}\n\\caption{(Color online) Training from scratch and policy reuse results for \\emph{Environment-4}: Single bit-flip error rate and a measurement error rate of 0.015. }\\label{fig5}\n\\end{figure}\n\n\\subsection{\\label{sec:0.007}\\emph{Environment-2}: error-probability=0.007}\nIn this environment, we increase the error-probability to $0.007$, which is relatively more challenging compared to the previous environments. We observe from \\figureautorefname{\\ref{fig0.007scratch}}, that the agent struggles to suppress the errors, and unlike the previous environment, the agent does not stabilize until $5000^{th}$ episode. However, when we implement the previous policy libraries (\\emph{Environment-0}, \\emph{Environment-1}), the agent starts learning around $2000^{th}$ episode and stabilizes well, as observed in \\figureautorefname{\\ref{fig0.007reuse}}.\n\n\\subsection{\\label{sec:0.011}\\emph{Environment-3}: error-probability=0.011}\nThe error-probability for this environment increases to $0.011$, making it more difficult for the agent to suppress errors. We see from \\figureautorefname{\\ref{fig0.011scratch}} that the training from scratch takes a longer time to converge/learn (14000 total episodes). From the results of the PPR algorithm, we see that the learning is relatively more stable even when a single policy is used, as shown in \\figureautorefname{\\ref{fig0.011_env0}}. In \\figureautorefname{\\ref{fig0.011_env0_env1}}, \\ref{fig0.011_env0_env1_env2} there is a drastic improvement in the average-qubit lifetime as well as the convergence of the agent when we use two, and three policies. We note that the average-qubit lifetime is not very high, but it is understandable as the environment is difficult to solve due to the increase in the probability of error. \n\n\\subsection{\\label{sec:0.015}\\emph{Environment-4}: error-probability=0.015}\nThe error-probability for this environment increases to $0.015$, it is the hardest environment we consider in this work. We see from \\figureautorefname{\\ref{fig5}} that single policy reuse of \\emph{Environment-3} does not provide any improvement over training from scratch. In \\figureautorefname{\\ref{fig6}} we compare the results for using a different number of policies from the policy library. We observe that there is an immense improvement in the average qubit lifetime from two policies to three policies. However, the performance of the agent does not change much from three policies to four policies. \n\n\n\\begin{figure}[t\n\\captionsetup[subfigure]{justification=centering}\n\\centering\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/DoubleDQNN_QEC_ReuseX0.015_Iter_Count_9999_err_0.015_avg_lifetime.pdf} \n \\subcaption{One Policy: [0.011]} \n \\label{fig6a} \n \\vspace{2ex}\n \\end{subfigure\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/DoubleDQNN_QEC_Reuse2X0.015_Iter_Count_9999_err_0.015_avg_lifetime.pdf}\n \\subcaption{Two Policies: [0.007, 0.011]}\n \\label{fig6b} \n \\vspace{2ex}\n \\end{subfigure} \n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/DoubleDQNN_QEC_Reuse3X0.015_Iter_Count_9999_err_0.015_avg_lifetime.pdf} \n \\subcaption{Three Policies: [0.003, 0.007, 0.011]} \n \\label{fig6c} \n \\vspace{2ex}\n \\end{subfigure\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/DoubleDQNN_QEC_Reuse4X0.015_Iter_Count_9999_err_0.015_avg_lifetime.pdf}\n \\subcaption{Four Policies: [0.003, 0.005, 0.007, 0.011]} \\label{fig6d}\n \\vspace{2ex}\n \\end{subfigure\n \\caption{(Color online) Policy reuse simulation for $p_{\\textrm{err}}=0.015$ with different number of policies. We observe that there is a huge improvement in the \\emph{average} qubit lifetime for three policies against two policies. There is not much notable difference when we increase the number of policies from three to four. However, we will show in \\figureautorefname{\\ref{fig:hist}} that the raw qubit lifetime increases for a higher number of policies.}\n \\label{fig6} \n\\end{figure} \n\\subsection{Different Number of Policies}\n\\begin{figure}[h\n\\captionsetup[subfigure]{justification=centering}\n\\centering\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/histograms/Error 0.015 Inset/DoubleDQNN_QEC_ReuseX0.015_Iter_Count_9999_err_0.015_hist_inset.pdf} \n \\subcaption{\\footnotesize Policy reuse simulation for \\emph{Environment-4} using \\textbf{one policy}: the \\emph{Environment-3} policy. Within the inset, the qubit lifetimes falling between \\emph{80} and \\emph{120} are few and heavily skewed to the right.} \n \\label{fig:hista} \n \\vspace{2ex}\n \\end{subfigure\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/histograms/Error 0.015 Inset/DoubleDQNN_QEC_Reuse2X0.015_Iter_Count_9999_err_0.015_hist_inset.pdf} \n \\subcaption{\\footnotesize Policy reuse simulation for \\emph{Environment-4} using \\textbf{two policies}: the \\emph{Environment-2} \\& \\emph{Environment-3} policies. Within the inset, there is more occupation in the higher lifetimes compared to (a).} \n \\label{fig:histb} \n \\vspace{2ex}\n \\end{subfigure} \n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/histograms/Error 0.015 Inset/DoubleDQNN_QEC_Reuse3X0.015_Iter_Count_9999_err_0.015_hist_inset.pdf} \n \\subcaption{\\footnotesize Policy reuse simulation for \\emph{Environment-4} using \\textbf{three policies}: the \\emph{Environment-0}, \\emph{Environment-2}, \\& \\emph{Environment-3} policies. Within the inset, there is a large overall increase in occupation for lifetimes between \\emph{80} and \\emph{120} compared to (a) and (b).}\n \\label{fig:histc} \n \\vspace{2ex}\n \\end{subfigure\n \\begin{subfigure}[]{0.5\\linewidth}\n \\centering\n \\includegraphics[width=0.95\\linewidth]{figures/histograms/Error 0.015 Inset/DoubleDQNN_QEC_Reuse4X0.015_Iter_Count_9999_err_0.015_hist_inset.pdf} \n \\subcaption{\\footnotesize Policy reuse simulation for \\emph{Environment-4} using \\textbf{four policies}: the \\emph{Environment-0}, \\emph{Environment-1}, \\emph{Environment-2}, \\& \\emph{Environment-3} policies. Within the inset, there is more occupation in the higher lifetimes compared to (c).} \\label{fig:histd}\n \\vspace{2ex}\n \\end{subfigure\n \\caption{(Color online) Histograms for the policy reuse simulations of \\emph{Environment-4} using different policies from the policy library. Each histogram plots the frequency of qubit lifetimes over the 10,000 episodes of each simulation. We observe that the simulations with more policies noticeably increased the qubit lifetimes of each episode as shown with higher occupation in the larger lifetime bins.}\n \\label{fig:hist} \n\\end{figure}\nIn \\figureautorefname{\\ref{fig:hist}} we illustrate the results of the algorithm in \\emph{Environment-4} ($p_{err} = 0.015$) in the form of histograms. Each histogram represents the culmination of $10000$ episodes over a single simulation and plots the frequency of qubit lifetimes gathered at the termination of each episode. The histogram insets highlight the frequencies of lifetimes ranging between 80 and 120. As we can see from \\figureautorefname{\\ref{fig:hist}}, there is a dramatic increase from using 1-2 policies (\\figureautorefname{\\ref{fig:hista}}, \\ref{fig:histb}) to 3-4 policies (\\figureautorefname{\\ref{fig:histc}}, \\ref{fig:histd}). This increase demonstrates the algorithm's capabilities in lengthening the qubit lifetime for a larger number of episodes as more policies are loaded into the policy library.\n\nIn \\tableautorefname{\\ref{table:2}} we compare the average qubit lifetime of the logical qubit decoded by the agent trained from scratch and using policy reuse against the average qubit lifetime of a single faulty qubit. For the bit-flip noise explored in this work, we find that for $p_{\\rm{err}}<0.011$ the decoding agent of the surface code $d=5$ logical qubit is able to attain an average qubit lifetime that is greater than that of the single faulty qubit. We attribute this to various factors. First, our PPR algorithm was not executed over multiple values of hyper-parameters. Therefore, the hyper-parameters used are not necessarily the optimal values. Second, we hope to see better performance when using more complex NN architectures \\cite{Julien2022,esslinger2022deep,Wang2018}. Finally, as pointed out before, the increasing number of policies can increase the qubit lifetime. Therefore, building a more exhaustive policy library for different values of $p_{\\rm{err}}$ will most likely improve the qubit lifetimes. \n\n\\setlength{\\tabcolsep}{0.5em}\n\\begin{table}[!ht]\n\\centering\n\\begin{tabular}{|c|c|c|c|}\n\\hline\np\\_{\\rm{err}} &\n \\begin{tabular}[c]{@{}c@{}}Single Faulty \\\\ Qubit Lifetime\\end{tabular} &\n \\begin{tabular}[c]{@{}c@{}}DDQN (training from \\\\ scratch) Surface Code\\end{tabular} &\n \\begin{tabular}[c]{@{}c@{}}Policy Reuse DDQN \\\\ Surface Code\\end{tabular} \\\\ \\hline\n0.003 & 333 & 1000 & --- \\\\ \\hline\n0.005 & 200 & 250 & 400 (1 policy) \\\\ \\hline\n0.007 & 143 & 120 & 180 (2 policies) \\\\ \\hline\n0.011 & 91 & 45 & 50 (3 policies) \\\\ \\hline\n0.015 & 67 & 25 & 40 (4 policies) \\\\ \\hline\n\\end{tabular}\n\n\\caption{Table comparing the lifetime for a single faulty qubit against the average qubit lifetime obtained from training the agent from scratch (using DDQN) and policy reuse. The qubit lifetime for training from scratch surpasses that of the faulty qubit up to $p_{\\rm{err}}=0.005$. The policy reuse is able to attain a higher average qubit lifetime compared to the faulty qubit up to $p_{\\rm{err}}=0.007$. For higher $p_{\\rm{err}}$ the policy reuse is not able to surpass the faulty qubit lifetime. However, the policy reuse results can be improved if we can tune the hyper-parameters, and/or increase the number of episodes for training the agent.}\n\\label{table:2}\n\\end{table}\n\\section{\\label{sec:Discussion}Discussion}\n\n\n\\subsection{Relevant Works}\n\\label{sec:RelevantWorks}\nMachine learning (ML) techniques have been applied to tackle certain quantum computing challenges recently. Notable examples are quantum architecture search (QAS) \\cite{fosel2021quantum,kuo2021quantum,ye2021quantum,ostaszewski2021reinforcement,chen2021quantum,yao2022monte,ding2022evolutionary,kimura2022quantum,duong2022quantum,he2022search,wang2022automated,chen2022generating,sogabe2022model}, quantum control \\cite{sivak2022model,niu2019universal,bukov2018reinforcement,brown2021reinforcement,brown2022optimal}, quantum compiling \\cite{moro2021quantum,zhang2020topological,he2021variational,pozzi2020using,chen2022efficient,chen2021quantum}, quantum error mitigation \\cite{kim2020quantum} and quantum error correction \\cite{nautrup2019optimizing,convy2022machine,andreasson2019quantum}.\nThe research fields mentioned above are all sequential decision-making tasks, common in quantum computing research at different levels of the quantum stack. All of those areas can benefit from the use of ML techniques, especially RL.\n\nQAS is in the high-level part of the stack. The purpose of QAS is to find a quantum circuit architecture suitable for a particular task. The task might be generating a desired quantum state \\cite{kuo2021quantum,ye2021quantum,kimura2022quantum}, finding an efficient circuit for solving chemical ground states \\cite{ostaszewski2021reinforcement}, solving an optimization task \\cite{yao2022monte,duong2022quantum}, optimizing a given quantum circuit for a particular hardware architecture \\cite{fosel2021quantum} or performing a machine learning task \\cite{ding2022evolutionary,duong2022quantum}. While our work's purpose is not to generate a quantum circuit, it does share some features with those QAS works. For example, the works \\cite{kuo2021quantum,ye2021quantum} require the RL agent to decide suitable quantum operations to be added. It is similar to choosing a proper action to `correct' the errors after knowing the syndrome information. However, this environment can also be implemented \nHaving the quantum circuit architecture, the next step is to compile the circuit into hardware-specific gate sequences. The compiled instructions may need to be further optimized or adjusted to be fault-tolerant or error-corrected. Our work is at this level. \nAt the most fundamental level, the quantum control procedure is used to translate the gate sequences into control signals applied to quantum physical systems \\cite{Krotov,GRAPE1,CRAB,GRAPE2,Riaz2019,GOAT}. These control signals (or pulses) are responsible for taking the qubit from one state to another. Optimal-control-based methods are widely used to improve the fidelity of quantum gate operations, by optimizing the control pulses\\cite{brown2022optimal,Koch_2016}. In recent years, several open-source software packages have been developed to improve gate fidelity using optimal pulse control \\cite{c3-optimize,qopt2021}. In \\cite{matekole2022methods}, quantum optimal control of pulses has been executed on the experimental hardware and has shown an improvement in the performance of quantum logic gates over the default logic gates.\nQuantum optimal control is an ideal problem for RL-based methods because it steers the quantum system to a particular target state by synthesizing the control fields for a given set of constraints, noise, and time. Deep-RL techniques have been implemented on the experimental quantum hardware of IBM and have shown some robustness to the errors\\cite{sivak2022model,niu2019universal,bukov2018reinforcement,brown2021reinforcement,He2021,qctrl2021}.\n\nThe proposed continual RL framework is applied to surface code decoding, however, it is not limited to QEC decoding only.\nFor example, in quantum compiling, we need to translate the high-level quantum algorithm or circuit architecture into a gate sequence suitable for particular quantum hardware and take into account the available quantum operation/gate sets or hardware topology\nIf we can leverage the knowledge of compiling policies trained on other quantum computers with similar but different topologies or configurations, it will reduce the overall development timeline. Another field where our methods can be applied to is quantum control. Since the device noises usually drift and we may not fully capture the dynamics accurately, it is very difficult to derive an exact analytical model for the system Hamiltonian which is required to derive the control signals.\nIf we can treat the device at each time step as a black box and train RL agents with the PPR algorithm as described in this paper, it may help in building the \\emph{autonomous control signal calibrator}.\n\\subsection{Running on a real Quantum Computer}\nThe ultimate goal of investigating these AI/ML methods for quantum error correction or other relevant topics mentioned in \\sectionautorefname{ \\ref{sec:RelevantWorks}} is to actually run on quantum computers. The major challenges, which motivated this research, are the changing device noise patterns in many aspects. In addition, it is impractical to deploy a large number of training episodes on real quantum computers to capture the error pattern. However, researchers and engineers collect valuable information about the machine of interest and people can design approximate noise models to simulate such systems to some extent. Such information will also benefit the actual deployment of our proposed methods. The idea is that we can train RL agents against these approximate noise models and gain some knowledge of the system. If we can observe the system for a bit longer time, we may also collect information on how these noise drift and construct a series of training environments as the \\emph{curriculum} to train the agent. After training with the curriculum, the agent has a better starting point to be trained with a real quantum computer with different noise patterns. For example, the agent may collect a library of policies to use when encountering real-world noise and this gained knowledge would significantly reduce the required training episodes.\n\n\\subsection{Hardware Accelerators and Model Distillation}\nThe practical application of the proposed method is to be working with real quantum hardware. One of the crucial issues is the latency of the inference process (e.g. generating suitable correcting actions in real time). This challenge can be potentially solved via specialized acceleration hardware such as a field-programmable gate array (FPGA) which is an integrated circuit designed to be configured or reprogrammed after manufacturing. \nIndeed, recent works have shown that FPGA can be used to accelerate the ML inference processes in certain fields such as computer vision \\cite{westby2021fpga} and particle physics data analysis \\cite{duarte2019fpga}. \nIt is expected that such techniques can be applied in RL as well \\cite{cho2019fa3c}.\nConsider a step further, we can even compress the trained models for more efficient inference. For example, existing methods such as model distillation \\cite{polino2018model,gou2021knowledge} can be used to largely reduce the model size while keeping the performance to the desired level. \nIn addition, trained models in the policy library can be distilled into several crucial policies. For example, environments may be different but still share many common features. The policies trained for these similar environments should be similar. It is desirable to extract the most important information from these policies to form \\emph{eigenpolicies} \\cite{fernandez2006probabilistic}. Various methods have been proposed to distillate the knowledge learned by the RL agent to build a more efficient learning scheme \\cite{rusu2015policy,traore2019discorl}. We expect those advanced continual RL techniques can be applied in the context of quantum computing.\n\\subsection{Beyond Decoding}\nRL and other ML techniques have been applied to QEC decoding problems \\cite{Torlai2017,krastanov2017deep,varsamopoulos2017decoding}. \nHowever, the performance of a QEC scheme is largely determined by the encoding method. The proposed method can be used to study encoding problems as well, e.g. finding optimal code structures under various noise patterns. For example, it is possible to utilize the framework in this paper to study how to apply a continual RL agent to find optimal quantum error correction codes when the device error changes over time. For example, in \\cite{nautrup2019optimizing} the RL agent was exposed to a more complicated surface code geometry, and the goal was to find the optimal connectivity of the surface code instead of just having nearest-neighbor connectivity on a simple square lattice. The agent was able to find unique qubit connectivities that yielded the best surface codes. \n\nAnother interesting application of RL that goes beyond decoding is \\emph{autonomous quantum error correction}, where the agent's task is to find the optimal encoding which is robust to the noisy dynamics of the quantum system. The authors in \\cite{wang2022automatedQEC} introduced an algorithm called AutoQEC that achieves this task while maximizing the fidelity of the logical qubit and were also able to discover a new quantum error correction code. Moreover, in \\cite{Florian2018} an ab initio method was proposed where machine learning was employed for a full QEC protocol discovery. Even though this method works for a small system of qubits, it provides an important outlook on the flexibility of RL that is capable of discovering QEC along with error mitigation, from scratch.\n\n\\section{\\label{sec:Conclusion}Conclusion}\nIn this work, we demonstrate the capabilities of the DDQN-PPR model in decoding the surface code when bit-flips have occurred. Through simulations in quantum environments with varying noise levels, we numerically show there is an increase in the agent's performance when it utilizes PPR to apply the knowledge from previously learned policies to a new noise environment. The agent's improvement is reflected by the increase in the average qubit lifetime seen with the PPR simulations when compared to the simulations done from scratch. While this algorithm has shown its capabilities in addressing QEC, the framework detailed here is general enough to be applied to other aspects of quantum computing such as QAS and QOC, where noise also plays a prominent role.\n\n\\clearpage\n\\begin{acknowledgments}\nThis work is supported by the U.S.\\ Department of Energy, Office of Science, Office of High Energy Physics program under Award Number DE-SC-0012704, Office of Workforce Development for Teachers and Scientists (WDTS) under the Science Undergraduate Laboratory Internships Program (SULI) \\& BNL High School Research Program (HSRP) and the Brookhaven National Laboratory LDRD \\#20-024. This research used resources of the Oak Ridge Leadership Computing Facility, which is a DOE Office of Science User Facility supported under Contract DE-AC05-00OR22725. This research used resources of the National Energy Research Scientific Computing Center (NERSC), a U.S. Department of Energy Office of Science User Facility located at Lawrence Berkeley National Laboratory, operated under Contract No. DE-AC02-05CH11231 using NERSC award HEP-ERCAPm4138.\n\n\\end{acknowledgments}\n\\par\\vfill\\break\n\n"} +{"id": "red-arxiv-13", "source_id": "red-arxiv_13_red-arxiv-13", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\n\nThe largest volume ellipsoid contained in a convex body in $\\Re^d$ and, in particular, John's result \\cite{john} \ncharacterizing it, plays a fundamental role in convexity. The latter states that the \norigin-centered Euclidean unit ball is the largest volume ellipsoid contained \nin the convex body $K$ if and only if it is contained in $K$ and the contact \npoints (that is, the intersection points of the unit sphere and the boundary of \n$K$) satisfy a certain algebraic condition.\n\nAs a natural generalization, one may fix two convex bodies $K$ and $L$ and \nsolve the optimization problem of finding a largest volume affine image of $K$ \ncontained in $L$. In this setting, one expects a John-type condition in terms of \ncontact pairs defined as follows.\nIf $K\\subseteq L\\subset\\Re^d$ are convex bodies, then a pair \n$(u,v)\\in\\Re^d\\times\\Re^d$ is called a \\emph{contact pair}, if\n$u$ belongs to the intersection of the boundaries of $K$ and $L,$\n$v$ belongs to the intersection of the boundaries of the polar sets $\\loglego{K}$ and $\\loglego{L},$ and $\\iprod{u}{v}=1$. In other words, $v$ is \nan outer normal vector of a common support hyperplane of $K$ and $L$ at a common \nboundary point $u$, with a proper normalization.\n\nV. Milman achieved the first results giving a condition of optimality in the \nabove problem (unpublished, see Theorem~14.5 in \\cite{TJ89}) followed by Lewis \n\\cite{L79}, which were strengthened and extended by Giannopoulos, Perissinaki, \nand Tsolomitis \\cite{GPT01} and then by Bastero and Romance \\cite{BR02}. \nFinally,\nGordon, Litvak, Meyer, and Pajor \\cite[Theorem~3.1]{GLMP04} proved the following.\n\n\\begin{thm}[Gordon, Litvak, Meyer, Pajor \\cite{GLMP04}]\\label{thm:GLMP}\n\tLet $K$ be a compact set containing the origin in the interior of its convex hull and $L$ be a convex body in $\\Re^d$ with $K\\subseteq L$ such that no \naffine image of $\\conv{K}$ contained in $L$ is of larger volume than $\\conv{K}$. Then there are contact pairs \n$\\{(u_i,v_i)\\colon i=1,\\ldots m\\}$ of $K$ and $L$ with $m\\leq d^2+d$ such that we have\n\t\\begin{equation*\n\t\\sum_{i=1}^{m} c_i u_i\\otimes v_i=\\mathrm{Id}_d, \\text{ and }\\;\n\n\t\\sum_{i=1}^{m} c_i v_i=0,\n\t\\end{equation*}\n where $\\mathrm{Id}_{d}$ denotes the identity operator on $\\Re^d$, and $u\\otimes v$ denotes \nthe linear operator $x\\mapsto \\iprod{v}{x}u$ for every $x\\in\\Re^d$.\n\\end{thm}\n\nAs a corollary, it is shown in \\cite{GLMP04} that under the assumptions of \n\\Href{Theorem}{thm:GLMP}, we have $L-z\\subseteq -d(K-z)$ for an appropriately \nchosen point $z\\in K$. The case when $-d$ cannot be \nreplaced by a magnification factor of smaller absolute value was studied in \n\\cite{Pal92, JN11}, see Gr\\\"unbaum's survey \\cite{Gr63} for more on this \nquestion. \n\n\n\n\nOur present goal is to extend \\Href{Theorem}{thm:GLMP} to the setting of \nlog-concave functions. As a natural generalization of the notion of \naffine images of convex bodies, we define the \\emph{positions} of a function $g$ \non $\\Re^d$ as\n\\[\\funpos{g}=\\{\\upalpha g(Ax+a)\\colon A\\in\\Re^{d\\times d} \\text{ non-singular}, \\upalpha>0, a\\in\\Re^d\\}.\\]\nWe will say that a function $f_1$ on $\\Re^d$ is \\emph{below} another function $f_2$ on $\\Re^d$ (or that $f_2$ is \\emph{above} $f_1$)\nand denote it as $f_1 \\leq f_2,$ if $f_1$ is pointwise less than or equal to $f_2,$\nthat is, $f_1(x) \\leq f_2(x)$ for all $x \\in \\Re^d.$ \n\n\nFixing $s > 0$ and two functions $f, g \\colon \\Re^d \\to [0, \\infty)$, we \nformulate the following optimization problem. \n\n\\medskip \\noindent\n\\textbf{The John $s$-problem:} Find\n\\begin{equation}\\label{eq:john_problem_intro}\n\\max\\limits_{h \\in \\funpos{g} \n\t\\int_{\\Re^d} h^s \n\t\\quad \\text{subject to} \\quad\n\th \\leq f.\n\\end{equation}\n\nJohn's theorem on largest volume ellipsoids was extended by \nAlonso-Guti{\\'e}rrez, Gonzales Merino, Jim{\\'e}nez and Villa \n\\cite{alonso2018john} to the setting of \\emph{logarithmically concave} \n(or in short, \\emph{log-concave}) functions, that is, those $\\Re^d\\to[0,\\infty)$ \nfunctions whose logarithm is a concave function. They consider the John $1$-problem with $g$ being the indicator function of the Euclidean unit ball $\\ball{d}.$\nA more general treatment was given in \\cite{ivanov2022functional}, where the authors \nconsider the John $s$-problem for some $ s > 0$ with $g$ being the ``height'' function of the upper hemisphere of the Euclidean ball $\\ball{d+1},$ that is, $g(x) = \\sqrt{1 - \\enorm{x}^2}$ for $\\enorm{x} \\leq 1$ and $g(x)= 0$ otherwise. The authors obtained\n the first necessary and sufficient condition on maximizers in this problem analogous to the original John condition. \n \n What conditions should $f$ and $g$ satisfy in order for the John $s$-problem to be meaningful? Following the path of analogy with \\Href{Theorem}{thm:GLMP} seems easy at first. Instead of closed sets, we will work with upper semi-continuous functions, instead of volume, we will work with the integral, or the integral of the $s$ power of the function. In \\Href{Theorem}{thm:GLMP}, $K$ and $L$ are compact, should we assume that the \\emph{support} of $f$ and $g$ defined as \n\\[\n\\supp f = \\{x \\in \\Re^d \\colon f(x) > 0 \\}\n\\]\nis bounded? That would be too restrictive, as it would disqualify the Gaussian density as $f$. On the other hand, clearly, the class of those functions $g$, for which the family $\\{h\\in\\funpos{g}\\colon h\\leq f\\}$ is not empty \\emph{for any} log-concave function $f$ with positive integral, is the class of functions with bounded support. Thus, $g$ being of bounded support is a natural assumption.\n\nWe will call an upper semi-continuous function of finite and positive integral a \\emph{proper} function.\n \n\\begin{basicassumptions*}\n\\label{assumptions:basic}\n We say that a function $g \\colon \\Re^d \\to [0, + \\infty)$ satisfies our\n \\emph{Basic Assumptions}, if it has the following properties:\n\\begin{itemize}\n\\item $g$ is a proper log-concave function, and\n\\item $\\supp g$ is bounded, and\n\\item the origin is in the interior of $\\supp g$.\n\\end{itemize} \n\\end{basicassumptions*}\n\\begin{cosmeticassumptions*}\n\\label{assumptions:intro} \n We say that a function $g \\colon \\Re^d \\to \\Re$ satisfies our\n \\emph{Auxiliary Assumptions} if it has the following properties:\n\\begin{itemize}\n\\item $g$ satisfies our Basic Assumptions, and\n\\item $g$ attains its maximum at the origin, and\n\\item $\\ln g$ is differentiable on $\\supp g$.\n\\end{itemize} \n\\end{cosmeticassumptions*}\n\n\\begin{dfn}\\label{defn:contacts_point}\nFor two functions $f,g\\colon\\Re^d\\to\\Re$, we call the set\n\t\t\\[\n\t\t\\contactpoint{f}{g} = \n\t\t\\big\\{ u \\in \\cl{\\supp f} \\cap \\cl{\\supp g} \\colon f(u) = g(u)\\big\\}\n\t\t \\]\ntheir \\emph{set of contact points}.\n\\end{dfn}\n\n\nWe are ready to state our first main result. \n\n\\begin{thm}[John's condition -- no zeros]\\label{thm:john_intro}\nFix $s>0$. \nLet $f \\colon\\Re^d\\to (0,+\\infty)$ be a proper log-concave function taking only positive values. Let $g = e^{-\\psi} \\colon \\Re^d \\to [0, +\\infty)$ be a function satisfying our Auxiliary Assumptions (see page~\\pageref{assumptions:intro}) such that $g \\leq f$.\nAssume that $h=g$ is a maximizer in John $s$-problem \n\\eqref{eq:john_problem_intro}.\nThen there are contact\npoints $u_1, \\dots, u_m \\in \\contactpoint{f}{g}$ \nand positive weights $c_1,\\ldots,c_m$ such that\n\\begin{equation}\\label{eq:functional_glmp_intro}\n\t\\sum_{i=1}^{m} c_i \\frac{{u}_i \\otimes \\nabla \\psi(u_i)}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}} = \n\t\\mathrm{Id}_{d}, \\quad \n\t\\sum_{i=1}^{m} \\frac{c_i}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}} = s\n\t\t\\quad\\text{and}\\quad\n\t\t\\sum_{i=1}^{m} c_i \\frac{\\nabla \\psi(u_i)}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}}=0.\n\\end{equation}\nMoreover, if $g$ is \\emph{radially symmetric}, then condition \\eqref{eq:functional_glmp_intro} is also sufficient. That is, if $g(x)=g_0(|x|)$ for some function $g_0:[0,\\infty)\\rightarrow[0,\\infty)$, and there are contact\npoints $u_1, \\dots, u_m \\in \\contactpoint{f}{g}$ \nand positive weights $c_1,\\ldots,c_m$ satisfying \\eqref{eq:functional_glmp_intro}, then \n$g$ is the unique maximizer in John $s$-problem \n\\eqref{eq:john_problem_intro}.\n\\end{thm}\n\n\\Href{Theorem}{thm:john_intro} will be a corollary to our much more general \n\\Href{Theorem}{thm:john_condition_general}. \n\nLet us elaborate on the conditions on the functions. \nFirst, the differentiability of $\\psi$ is assumed for simplicity, in the general setting it will not be necessary as we will consider subgradients of $\\psi$, and the \nFr\\'echet normal cones (see \\Href{Definition}{def:nconus}) of the \\emph{lifting} of $f$ and $g$, defined as \n\\[\n\\lifting{f} = \\left\\{ (x, y) \\in \\Re^{d+1} \\colon x \\in \\cl{\\supp f}, |y|\\leq f(x) \\right\\}\\subset\\Re^{d+1}.\n\\]\n\nSecond, as in the case of convex sets, the origin must be chosen in a certain way. \nThe assumption that $g$ attains its maximum at the origin is artificial and is imposed for simplicity; as we will see, it implies that all the denominators in the equations in\n\\eqref{eq:functional_glmp_intro} are positive, which is the real key condition for our theorem to hold. In fact, we will show that any point from the interior of the support of $g$ sufficiently close to the maximum can be chosen as the origin.\n\nThird, analogously to \\Href{Theorem}{thm:GLMP}, where $K$ need not be convex, our $g$ need not be log-concave. We will have an analogue of the convex hull as well, the log-concave envelope, see \\Href{Definition}{def:logenv}.\n\n\nFinally, the assumption that $f$ takes only positive values is the trickiest one! The issue is that there might be ``irregular'' contact points \n$u \\in \\cl{\\supp f} \\cap \\cl{\\supp g}$ with $f(u)=g(u)=0$ that require special attention, as we will see in \\Href{Section}{sec:boundedcontactpairs}.\n\\Href{Theorem}{thm:john_intro} side steps this problem by its assumption that $f$ is nowhere zero.\n\nTo obtain a condition for optimality in John $s$-problem \\eqref{eq:john_problem_intro} similar to the one in \\Href{Theorem}{thm:GLMP}, we define contact pairs for functions through contacts of their liftings. Since liftings of log-concave functions are not \nconvex in general, we need to take extra care defining normal vectors, which we \nwill do in \\Href{Section}{sec:notation}.\n\\begin{dfn}\\label{defn:contactset_intro}\nFor two functions $f,g\\colon\\Re^d\\to\\Re$, their \\emph{set of contact pairs} is defined as\t\t\\[\n\t\t\\contactsetnr{f}{g} = \n\t\t\\big\\{(\\upthing{u},\\upthing{v})\\in\\Re^{d+1}\\times\\Re^{d+1} \\colon \n\t\t \\upthing{u}=(u, f(u)), u \\in \\cl{\\supp {f}}\\cap\\cl{\\supp {g}},\\, f(u) = g(u),\n\t\t \\]\n\t\t \\[\n\t\t \\upthing{v} \\in \\nfcone{\\lifting f}{\\upthing{u}}\\cap \\nfcone{\\lifting g}{\\upthing{u}}, \n\t\t \\, \\iprod{\\upthing{v}}{\\upthing{u}}=1\\big\\},\n\t\t\\]\nwhere $\\nfcone{A}{\\upthing{u}}$ denotes the Fr\\'echet normal cone of the set \n$A\\subset\\Re^{d+1}$ at a point $\\upthing{u} \\in \\Re^{d+1},$ see \n\\Href{Definition}{def:nconus}.\n\\end{dfn}\n\nIn the following theorem, no additional assumptions are imposed on $f$ except for being proper and log-concave. On the other hand, we require $g$ to be \\emph{$q$-concave}, that is, $g^q$ is concave on its support. \n\\begin{thm}[John's condition -- $q$-concave case]\\label{thm:john_intro-concave}\nFix $s>0$. Let $f,g \\colon\\Re^d\\to[0,+\\infty)$ be two proper log-concave functions.\nLet $g = e^{-\\psi} \\colon \\Re^d \\to [0, +\\infty)$ be a function satisfying our Auxiliary Assumptions (see page~\\pageref{assumptions:intro}) and such that $g \\leq f.$ Additionally, let $g$ be $q$-concave for some $q > 0$.\nAssume that $h=g$ is a maximizer in John $s$-problem \n\\eqref{eq:john_problem_intro}.\nThen there are contact pairs \n$(\\upthing{u}_1,\\upthing{v}_1)$, $\\dots$, \n$(\\upthing{u}_m,\\upthing{v}_m)$ $\\in \\contactsetnr{f}{g}$\nand positive weights $c_1,\\ldots,c_m$ such that\n\\begin{equation}\\label{eq:functional_glmp_concave_intro}\n\t\\sum_{i=1}^{m} c_i {u}_i \\otimes {v}_i = \n\t\\mathrm{Id}_{d}, \\quad \n\t\\sum_{i=1}^{m} c_i f(u_i)\\nu_i = s\n\t\t\\quad\\text{and}\\quad\n\t\t\\sum_{i=1}^{m} c_i v_i=0,\n\\end{equation}\nwhere $\\upthing{u}_i=(u_i, f(u_i))$ and $\\upthing{v}_i=(v_i,\\nu_i)$.\nMoreover, if $g$ is radially symmetric, then condition \\eqref{eq:functional_glmp_concave_intro} is also sufficient. That is, if $g$ is radially symmetric, and there are contact pairs \n$(\\upthing{u}_1,\\upthing{v}_1)$, $\\dots$, \n$(\\upthing{u}_m,\\upthing{v}_m)$ $\\in \\contactsetnr{f}{g}$\nand positive weights $c_1,\\ldots,c_m$ satisfying \\eqref{eq:functional_glmp_concave_intro}, then \n$g$ is the unique maximizer in John $s$-problem \n\\eqref{eq:john_problem_intro}.\n\\end{thm}\n\nA dual construction to the largest volume ellipsoid contained in a convex body\nis the smallest volume ellipsoid containing a body. It is usually called the L\\\"owner ellipsoid. Notably, the necessary and sufficient conditions for the Euclidean unit ball to be this minimal ellipsoid coincide with the conditions in John's characterization of the largest volume ellipsoid. For historical precision, we remark that it was this formulation, now attributed to L\\\"owner, that John considered in the first place in \\cite{john}. In the setting of convex sets, there is hardly any difference between the two problems -- $K$ has the largest volume among all its affine images inside $L$ if and only if $L$ has the smallest volume among all its affine images containing $K$. So, \\Href{Theorem}{thm:GLMP} provides us with a necessary condition in this case as well. However, it is not the case in the functional setting! Let us formulate a dual functional problem and explain the issue. \n\n\\medskip \\noindent\n\\textbf{The L\\\"owner $s$-problem:} Find\n\\begin{equation}\n\\label{eq:lowner_problem_intro}\n\\min\\limits_{h \\in \\funpos{g} }\n\t\\int_{\\Re^d} h^s \n\t\\quad \\text{subject to} \\quad\n\tf \\leq h.\n\\end{equation}\n\nAs in the case of the John $s$-problem, the set of $h \\in \\funpos{g}$ satisfying $f \\leq h$ may be empty, for example, if $g$ is of compact support and the support of $f$ is the whole space $\\Re^d$. Unlike in the case of the John $s$-problem, characterizing the class of those functions $g$, for which the family $\\{h\\in\\funpos{g}\\colon h\\geq f\\}$ is not empty \\emph{for any} proper log-concave function $f$, is not straight forward. Clearly, the support of $g$ needs to be $\\Re^d$, but this condition alone is not sufficient. In order to find this class, we consider the polars of $f$ and $g$.\n\nThe \\emph{log-conjugate} (or \\emph{polar}) of a function $f\\colon\\Re^d \\to [0, +\\infty)$ is \ndefined by\n\\[\n\\loglego{f}(y) = \\inf\\limits_{x \\in \\supp{f}} \\frac{e^{-\\iprod{x}{y}}}{f(x)},\n\\]\nand is known to be a log-concave function, see \\Href{Section}{sec:notation} for details. \n\nSince $h\\geq f$ if and only if $\\loglego{h}\\leq\\loglego{f}$, it follows that the class of those log-concave functions $g$, for which the family $\\{h\\in\\funpos{g}\\colon h\\geq f\\}$ is not empty \\emph{for any} proper log-concave function $f$, is the class of log-concave functions that are polar to functions with bounded support. \n \nThe L\\\"owner $s$-problem was investigated by Li, Sch\\\"utt and Werner \\cite{LSW21} and by Ivanov and Tsiutsiurupa \\cite{ivanov2021functional} for certain special choices of $g$. We note that no John type condition of optimality was obtained. Our second main result provides it.\n\n\\begin{thm}[L\\\"owner's condition -- no zeros]\\label{thm:lowner_intro}\nFix $s>0$. \nLet $f \\colon\\Re^d\\to (0,+\\infty)$ be a proper log-concave function such that \n$\\loglego{f}$ takes only positive values. \nLet $g \\colon \\Re^d \\to [0, +\\infty)$ be a proper log-concave function such that $f \\leq g$ and $\\loglego{g}$ satisfies our Auxiliary Assumptions (see page~\\pageref{assumptions:intro}).\nSet $\\loglego{g} = e^{-\\psi}$, and assume that $h=g$ is a minimizer in L\\\"owner $s$-problem \n\\eqref{eq:lowner_problem_intro}.\nThen there are contact\npoints $u_1, \\dots, u_m \\in \\contactpoint{\\loglego{f}}{\\loglego{g}}$ \nand positive weights $c_1,\\ldots,c_m$ such that\n\\begin{equation}\\label{eq:functional_glmp_lowner_intro}\n\t\\sum_{i=1}^{m} c_i \\frac{{u}_i \\otimes \\nabla \\psi(u_i)}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}} = \n\t\\mathrm{Id}_{d}, \\quad \n\t\\sum_{i=1}^{m} \\frac{c_i}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}} = s\n\t\t\\quad\\text{and}\\quad\n\t\t\\sum_{i=1}^{m} c_i \\frac{\\loglego{g}(u_i) \\cdot \\nabla \\psi(u_i)}{1 + \\iprod{\\nabla \\psi(u_i)}{u_i}}=0.\n\\end{equation}\nMoreover, if $g$ is radially symmetric, then condition \n\\eqref{eq:functional_glmp_lowner_intro} is also sufficient. That is, if $g$ is radially symmetric, and there are contact\npoints $u_1, \\dots, u_m \\in \\contactpoint{\\loglego{f}}{\\loglego{g}}$ \nand positive weights $c_1,\\ldots,c_m$ satisfying \\eqref{eq:functional_glmp_lowner_intro}, then $g$ is a minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro}.\n\\end{thm}\n\n\n\\begin{thm}[L\\\"owner's condition -- $q$-concave case]\\label{thm:lowner_intro-concave}\nFix $s>0$. \nLet $f \\colon\\Re^d\\to (0,+\\infty)$ be a proper log-concave function. \nLet $g \\colon \\Re^d \\to [0, +\\infty)$ be a proper log-concave function such that $f \\leq g$ and $\\loglego{g}$ satisfies our Auxiliary Assumptions (see page~\\pageref{assumptions:intro}).\nAdditionally, assume that $\\loglego{g}$ is $q$-concave with some $q>0$. \nAssume also that $h=g$ is a minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro}. \nThen there are contact pairs \n$(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactsetnr{\\loglego{g}}{\\loglego{f}}$\nand positive weights $c_1,\\ldots,c_m$ such that\n\\begin{equation}\\label{eq:functional_glmp-lowner-conc_intro}\n\t\\sum_{i=1}^{m} c_i {v}_i \\otimes {u}_i = \\mathrm{Id}_{d}, \\quad \n\t\\sum_{i=1}^{m} c_i \\loglego{g}(u_i) \\cdot \\nu_i = s\n\t\t\\quad\\text{and}\\quad\n\t\t\\sum_{i=1}^{m} c_i\\loglego{g}(u_i) \\cdot \\nu_i u_i=0,\n\\end{equation} \nwhere $\\upthing{u}_i=(u_i, \\loglego{g}(u_i))$ and $\\upthing{v}_i=(v_i,\\nu_i)$.\nMoreover, if $g$ is radially symmetric, then condition \\eqref{eq:functional_glmp-lowner-conc_intro} is also sufficient. That is, if $g$ is radially symmetric, and there are contact pairs \n$(\\upthing{u}_1,\\upthing{v}_1)$, $\\dots$, \n$(\\upthing{u}_m,\\upthing{v}_m)$ $\\in \\contactsetnr{f}{g}$\nand positive weights $c_1,\\ldots,c_m$ satisfying \\eqref{eq:functional_glmp-lowner-conc_intro}, then \n$g$ is a maximizer in L\\\"owner $s$-problem \n\\eqref{eq:lowner_problem_intro}.\n\\end{thm}\n\n\\Href{Theorems}{thm:lowner_intro} and \\ref{thm:lowner_intro-concave} will be corollaries to our more general \\Href{Theorem}{thm:lowner_condition_general}.\n\n\\begin{remark}[Duality and duality]\nObserve that even though \\Href{Theorem}{thm:lowner_intro} is phrased in \nterms of $\\loglego{f}$ and $\\loglego{g}$, it is not the same as \n\\Href{Theorem}{thm:john_intro} for $\\loglego{f}$ and \n$\\loglego{g}$, even in the case where $s=1$. The reason is that we need to maximize/minimize a different \nfunctional -- the integral of the polar of the function instead of the integral \nof the function itself. In other words, the solution to L\\\"owner's problem is \nnot the dual of the solution to John's problem. \nMoreover, comparing \\eqref{eq:functional_glmp_lowner_intro} and \\eqref{eq:functional_glmp_intro}, we see that the conditions are different. See more on this in \\Href{Section}{sec:johnlownerequivalence}.\n\nThis is a major difference between our results and \\Href{Theorem}{thm:GLMP}, \nsince the latter has a self-dual form, \\cite[Theorem~3.8]{GLMP04}. \n\\end{remark}\n\n\n\n\n\\subsection{Structure of the paper}\n\n\nIn \\Href{Section}{sec:notation}, we recall the basics of the theory of \nlog-concave functions and polarity on functions.\nThen, in \\Href{Section}{sec:normalcones}, we discuss properties of normal cones \nof liftings of log-concave functions. These are rather technical facts, we \nsuggest skipping the proofs on a first reading.\nWe state and prove our first main result, \\Href{Theorem}{thm:john_condition_general}, the condition of \noptimality in John's problem in \\Href{Section}{sec:john}, and \nour second main result, \\Href{Theorem}{thm:lowner_condition_general}, the condition of optimality in the L\\\"owner's problem in \\Href{Section}{sec:lowner}.\n\nIn \\Href{Section}{sec:existence_uniqueness}, we show that the optima generally exist in both the John and the L\\\"owner problem, and discuss when uniqueness holds -- and when it does not.\n\n\\Href{Section}{sec:normalcone_subdifferential} describes the normal cone of the lifting of a log-concave function $e^{-\\psi}$ in terms of the subddiferential of $\\psi$. Then, in \\Href{Section}{sec:boundedcontactpairs}, more readily applicable conditions on $g$ are shown that guarantee that the very technical conditions of \\Href{Theorems}{thm:john_condition_general} and \\ref{thm:lowner_condition_general} on $f$ and $g$ hold for essentially all meaningful choice of $f$.\n\nIn \\Href{Sections}{sec:radially_symmetric} and \\ref{sec:qconcave}, we present the preliminaries needed to prove the results of the Introduction on radially symmetric and $q$-concave functions.\n\n\\Href{Section}{sec:introproofs} contains the proofs of the results of the Introduction by combining the results of Sections \\ref{sec:normalcone_subdifferential} through \\ref{sec:qconcave} to show how our two main, general results \\Href{Theorems}{thm:john_condition_general} and \\ref{thm:lowner_condition_general} apply.\n\nIn \\Href{Section}{sec:corollaries_and_discussion}, we note that \\Href{Theorem}{thm:GLMP} follows from our results, and study the relationship of the John and L\\\"owner problems. Finally, we discuss what changes need to be made if affine positions of functions are replaced by linear positions, that is, when translations in $\\Re^d$ are not allowed in the optimization problems.\n\n\n\n\n\n\n\n\\section{Basic notions}\\label{sec:notation}\n\nWe use $\\operatorname{int} K$, $\\partial K$, $\\cl{K},$ and $\\conv K$ to denote respectively the interior, boundary, closure and convex hull of a set $K$ in some Euclidean space, mostly $\\Re^d$ or $\\Re^{d+1}$. \nWe denote the Euclidean unit ball by $\\ball{d}=\\{x\\in\\Re^d\\colon |x|\\leq 1\\}$.\nWe will think of $\\Re^d$ as the linear subspace of $\\Re^{d+1}$ spanned by the first $d$ elements of the standard basis. We denote the orthogonal projection from $\\Re^{d+1}$ to $\\Re^d$ by $P_d$.\nWe use $e_{d+1}$ to denote the last vector of the standard basis of $\\Re^{d+1}.$\n\n\\subsection{Functions}\nLet $f\\colon\\Re^d\\to[0,\\infty)$ be a function.\nFor $\\upalpha\\in\\Re$, we denote its $\\upalpha$ superlevel set by\n\\[\n [f\\geq \\upalpha]=\\{x\\in\\Re^d\\colon f(x)\\geq\\upalpha\\},\n\\]\nand we use similar notations for level sets and sublevel sets of functions.\nWe denote the \\emph{support} of $f$ by \n\\[\n\\supp f = \\{x \\in \\Re^d \\colon f(x) > 0 \\}.\n\\]\nThe \\emph{essential graph} and the \\emph{lifting} of $f$ are the sets\n\\[\n\\mathrm{ess}\\ \\mathrm{graph}\\ f = \\left\\{ (x, f(x)) \\colon x \\in \\cl{\\supp f} \\right\\},\\text{ and }\n\\]\n\\[\n\\lifting{f} = \\left\\{ (x, y) \\colon x \\in \\cl{\\supp f}, |y|\\leq f(x) \\right\\}\n\\]\nin $\\Re^{d+1}$. \n\nWe call an upper semi-continuous function of finite and positive integral a \\emph{proper} function. \nNote that for a proper log-concave function $f$, we have that $\\lifting{f}$ is compact if and only if, $\\cl{\\supp f}$ is compact, which is equivalent to $f$ having bounded support.\nA special class of log-concave functions is \\emph{$q$-concave functions}\\label{page:qconcave} for some $q>0$, which is those $f$ for which $f^q$ is concave on its support. It is an exercise to show that if $f$ is $q$-concave and $0< r \\leq q$, then $f$ is $r$-concave as well. \n\nThe \\emph{effective domain} of a convex function $\\psi \\colon \\Re^d \\to {\\mathbb R} \\cup \\{+ \\infty\\}$ is the set \n\\[\n\\dom \\psi = \\left\\{x \\colon \\psi(x) < + \\infty\\right\\},\n\\]\nthe \\emph{epigraph} of $\\psi$ is\n\\[\n\\operatorname{epi} \\psi = \\left\\{(x, \\xi) \\colon x\\in \\dom \\psi, \\; \\xi \\in {\\mathbb R}, \\; \\xi \\geq \\psi(x) \\right\\}.\n\\]\nNote that if $f=e^{-\\psi}$ is a proper log-concave function, then $\\psi$ is a lower semi-continuous convex function whose epigraph is closed, and\n\\begin{equation}\\label{eq:suppequalsdom}\n \\supp f =\\dom \\psi \n\\end{equation}\nare convex sets in $\\Re^d$ with non-empty interior.\n\n\n\\begin{dfn}\\label{def:logenv}\nFor a function $g \\colon \\Re^d \\to [0, + \\infty)$, its \\emph{log-concave envelope}, $\\logconc{g}$, is the minimal upper semi-continuous log-concave function $h$ satisfying $g \\leq h$. \n\\end{dfn}\nWe note that for any function $g$, the log-concave envelope $\\logconc{g}$ is well-defined.\nThe epigraph of $-\\ln(\\logconc{g})$ is the closure of the convex hull of the epigraph of $-\\ln(g)$ in $\\Re^{d+1}$.\n\n\n\n\n\\subsection{Positions, Minkowski's determinant inequality}\nWe will work with positions of functions that are the analogues of affine images of convex bodies.\nTo this end, we define the vector space\n \\[\n \\mathcal{W}=\\{(A\\oplus\\upalpha, a) \\colon A\\in\\Re^{d\\times d}, \\upalpha\\in\\Re \\text{ and } a\\in\\Re^d\\},\n \\]\nand its subsets\n \\[\n \\mathcal{M}=\\{(A\\oplus\\upalpha, a)\\in\\mathcal{W} \\colon A \\text{ is non-singular, and } \\upalpha>0\\},\n \\]\nand\n \\[\n \\mathcal{M}^+=\\{(A\\oplus\\upalpha, a)\\in\\mathcal{M} \\colon A \\text{ is positive definite}\\}.\n \\]\nWe will refer to elements of $\\mathcal{W}$ as \\emph{extended contact operators}, and\nwe will say that $A \\oplus \\upalpha$ is the \\emph{operator part} and \n$a$ is the \\emph{translation part} of $(A \\oplus \\upalpha, a) \\in \\mathcal{W}.$ \n \nWe denote by\n\\[\\funpos{g}=\\{\\upalpha g(Ax+a)\\colon (A\\oplus\\upalpha, a)\\in\\mathcal{M}\\}\\]\nthe \\emph{positions} of a function $g$ on $\\Re^d$, and by\n\\[\\funppos{g}=\\{\\upalpha g(Ax+a)\\colon (A\\oplus\\upalpha, a)\\in\\mathcal{M}^+\\}.\\]\nthe \\emph{positive positions} of $g$.\n\nWe recall the additive and the multiplicative forms of the\n\\emph{Minkowski determinant inequality}. Let $A$ and $B$ be \npositive definite matrices of order $d$. Then, for any $\\lambda \\in (0,1),$\n\\begin{equation}\n\\label{eq:minkowski_det_ineq}\n\\left(\\det\\left( \\lambda A + (1 - \\lambda)B\\right)\\right)^{1/d} \\geq\n\\lambda \\left(\\det A\\right)^{1/d} + \n(1 -\\lambda)\\left(\\det B\\right)^{1/d},\n\\end{equation}\nwith equality if and only if $A = cB$ for some $c > 0;$ and \n\\begin{equation}\n\\label{eq:minkowski_det_multipl_ineq}\n\\det\\left( \\lambda A + (1 - \\lambda)B\\right) \\geq\n\\left(\\det A\\right)^{\\lambda} \\cdot \\left(\\det B\\right)^{1 -\\lambda},\n\\end{equation}\nwith equality if and only if $A = B$.\n\n\n\n\\subsection{Polarity for sets and functions}\\label{sec:polarity}\nRecall that the polar of a set \n $K$ in $\\Re^d$ is given by\n \\[\n {K}^\\circ = \\{y \\in \\Re^d \\colon \\iprod{x}{y} \\leq 1\n \\quad \\text{for all } \\ x \\in K \\}.\n \\]\nFor any cone $C$ with apex at the origin of a linear space $L$, we call its polar set the \\emph{polar cone}. It is easy to see that\n\\[\nC^\\circ = \\left\\{p \\in L^{\\ast} \\colon \\iprod{p}{a} \\leq 0 \\quad \\text{ for all } a \\in C\\right\\}.\n\\]\n\nThe classical \\emph{convex conjugate} transform (or \\emph{Legendre transform})\n$\\legendre$ is defined for a function $\\varphi: \\Re^d \\to {\\mathbb R}\\cup \\{+\\infty\\}$ by\n\\[\n\\slogleg[]{\\varphi}(y) = \\sup\\limits_{x \\in \\Re^d} \\{\\iprod{x}{y} - \\varphi(x)\\}.\n\\]\nThis notion yields the following duality mapping on the set of log-concave functions, justified in~\\cite{artstein2007characterization, artstein2008concept, artstein2009concept}.\nThe \\emph{log-conjugate} (or \\emph{polar}) of a log-concave function $f = e^{-\\psi} : \\Re^d \\to [0, +\\infty)$, is defined by\n\\[\n\\loglego{f}(y) = e^{- (\\slogleg[] \\psi)(y)} = \n\\inf\\limits_{x \\in \\supp{f}} \\frac{e^{-\\iprod{x}{y}}}{f(x)}.\n\\]\n\n\n\\subsection{The normal cone}\\label{sec:normalcone}\n\nFor a set $S$ in a linear space, we define its \\emph{positive cone} \n(or \\emph{cone hull}) by\n\\[\n\\poscone{S} = \\left\\{\\lambda a \\colon \\lambda > 0,\\ a \\in \\conv S \\right\\}.\n\\]\n\n\\begin{dfn}\\label{def:nconus}\nThe {\\emph{Frech\\'et normal cone}} (in short, the \\emph{normal cone}) to a set \n$A \\subset \\Re^d$ at a point $a_0\\in A$ is the set of vectors $v\\in\\Re^d$ such \nthat for any $\\varepsilon > 0$, there is $\\delta>0$ such that\n $\\iprod{v}{a - a_0} \\leq \\varepsilon {\\enorm{a - a_0}} \\text{ for all } \n a\\in A\\cap \\parenth{ \\delta\\ball{d} + a_0}$. In short,\n\\[\n\\nfcone{A}{a_0} = \\left\\{ v \\in \\Re^d \\colon \n\\iprod{v}{a - a_0} \\leq \\littleo{\\enorm{a - a_0}} \\text{ for all }a\\in A \\right\\}.\n\\]\nClearly, $\\nfcone{A}{a_0}$ is a closed convex cone in $\\Re^d$.\n\\end{dfn}\n\nIt is easy to see that the normal cone to a convex set $K$ in $\\Re^d$ at a boundary point $a_0\\in\\partial K$ coincides with the usual normal cone, that is,\n\\begin{equation}\\label{eq:normal_cone_dual_to_poscone}\n\\nfcone{K}{a_0}=\n\\left\\{ v \\in \\Re^d \\colon \n\\iprod{v}{a - a_0} \\leq 0 \\text{ for all } a\\in K\\right\\}\n = \\parenth{\\poscone{K - a_0}}^\\circ. \n\\end{equation}\n\nMostly, we will consider the normal cone of the lifting of an upper semi-continuous log-concave function $f$. In particular, we will show in the next section that $\\nfcone{\\lifting{f}}{\\upthing{u}}$ is not empty at any \n$\\upthing{u}\\in\\mathrm{ess}\\ \\mathrm{graph}\\ {f}$.\n\n\n\n\n\n\n\n\n\n\n\n\\subsection{Contact pairs}\nAn important technical step in our analysis is to consider most but not all contact pairs of $f$ and $g$.\n\\begin{dfn}\\label{defn:contactset}\nFor two functions $f,g\\colon\\Re^d\\to\\Re$, we call the set\n\t\t\\[\\contactset{f}{g} = \n\t\t\\big\\{(\\upthing{u},\\upthing{v})\\in \\contactsetnr{f}{g} \\colon f(u)\\neq 0 \\big\\} \\cup\n\t\t\\big\\{(\\upthing{u},\\upthing{v})\\in \\contactsetnr{f}{g} \\colon f(u)=g(u)=0, \\upthing{v}=(v, 0) \\big\\}.\n\t\t\\]\ntheir \\emph{reduced set of contact pairs}, where\n\t\t\\[\\contactsetnr{f}{g} = \n\t\t\\big\\{(\\upthing{u},\\upthing{v})\\in\\Re^{d+1}\\times\\Re^{d+1} \\colon \n\t\t \\upthing{u}=(u, f(u)), u \\in \\cl{\\supp {f}}\\cap\\cl{\\supp {g}},\\, f(u) = g(u),\n\t\t \\]\n\t\t \\[\n\t\t \\upthing{v} \\in \\nfcone{\\lifting f}{\\upthing{u}}\\cap \\nfcone{\\lifting g}{\\upthing{u}}, \n\t\t \\, \\iprod{\\upthing{v}}{\\upthing{u}}=1\\big\\},\n\t\t\\]\nas given in \\Href{Definition}{defn:contactset_intro}.\n\\end{dfn}\n\nThe difference between $\\contactsetnr{f}{g}$\n and $\\contactset{f}{g}$ is that in the latter, we exclude outer normals with non-zero last coordinate at contact points at which both functions vanish.\n\n\nNote that if $g\\leq f$ and $u\\in\\Re^d$ is such that $f(u)=g(u)$, then \n$\\nfcone{\\lifting{f}}{\\upthing{u}}\\subseteq\\nfcone{\\lifting{g}}{\\upthing{u}}$, \nwhere $\\upthing{u}=(u,f(u))$.\nThus, if $g \\leq f$, then one has\n\t\t\\[\n\t\t\\contactsetnr{f}{g} = \n\t\t\\big\\{(\\upthing{u},\\upthing{v})\\in\\Re^{d+1}\\times\\Re^{d+1} \\colon \n\t\t \\upthing{u}=(u, f(u)) \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f} \\cap \\mathrm{ess}\\ \\mathrm{graph}\\ {g},\n\t\t \\]\n\t\t \\[\n\t\t \\upthing{v} \\in \\nfcone{\\lifting f}{\\upthing{u}}, \\quad\n\t\t \\, \\iprod{\\upthing{v}}{\\upthing{u}}=1\\big\\}.\n\t\t\\]\n\n\nAlso, we will need to ensure that for any point $\\upthing{u} \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f}$ and any \n$\\upthing{v} \\in \\nfcone{\\lifting{f}}{\\upthing{u}},$ the angle between $\\upthing{u}$ and $\\upthing{v}$ is acute, that is, $\\iprod{\\upthing{u}}{\\upthing{v}} >0$.\nIn the case of convex sets, this condition easily follows from the assumption that the origin is in the interior of the set. In our functional case, a bit more care is needed. \n\\begin{dfn}\\label{dfn:weaklustarlike}\nLet $f \\colon \\Re^d \\to [0, + \\infty)$ be a function. \nWe say that a set ${U} \\subset \\Re^d$ is a \\emph{star-like set with respect to~}$f$, if \nfor every $ u \\in U \\cap \\supp f$, we have $\\iprod{(u,f(u))}{\\upthing{v}} > 0$ for all non-zero \n$\\upthing{v}\\in\\nfcone{\\lifting{f}}{(u, f(u))}.$ \n\\end{dfn}\n\nSince the Fr\\'echet normal cone is always closed, we immediately have the following.\n\\begin{lem}\\label{lem:locstar_geom_meaning}\nLet $f \\colon \\Re^d \\to [0, + \\infty)$ be a proper log-concave function, and $u \\in \\supp {f}$. Set $\\upthing{u}= (u, f(u))$, and assume that $\\{u\\}$ is a star-like set with respect to~$f$. Then \n\\[\n\\nfcone{\\lifting{f}}{\\upthing{u}} = \\poscone{\\upthing{v} \\in \\nfcone{\\lifting{f}}{\\upthing{u}} \\colon \\iprod{\\upthing{v}}{\\upthing{u}} = 1}.\n\\]\n\\end{lem}\n\n\n\\section{The normal cone of the lifting}\\label{sec:normalcones}\nIn this section, we collect several technical statements about normal cones, which will be used in \nthe proofs of the main results The proofs of the statements are based on mostly standard methods of real and convex analyses. This section is self-contained, that is, no proof herein relies on any out-of-section statement.\nIt may be ideal to omit these proofs on a first reading.\n\n\nWe recall that the \\emph{Hausdorff distance} between two compact subsets $K$ and $L$ of $\\Re^d$\nis defined by \n\\[\n\\hausmetric{K}{L} = \\inf \\left\\{\\lambda > 0 \\colon K \\subset L + \\lambda \\ball{d};\n\\quad L \\subset H + \\lambda \\ball{d} \\right\\}.\n\\]\n\n\\begin{prop}[Hemicontinuity of the normal cone for convex sets]\\label{prop:normalconesemicont}\nLet $\\{K_i\\}_{i \\in {\\mathbb N}}$ be a sequence of bounded convex sets in $\\Re^d$ converging in the Hausdorff distance to a convex set $K$, and let $\\{u_i\\}_{i \\in {\\mathbb N}}$ be a sequence of points with $u_i \\in {K_i}$ converging to a point $u\\in \\partial{K}$. \nLet $\\{v_i\\}_{i \\in {\\mathbb N}}$ be a sequence of outer normals $v_i\\in\\nfcone{K_i}{u_i}$ converging to a unit vector $v\\in\\Re^d$.\nThen $v\\in\\nfcone{K}{u}$.\n\\end{prop}\n\\begin{proof}\nAccording to equation (2.3) of \\cite{schneider2014convex}, \n\\[\n \\nfcone{K}{u}=p_K^{-1}(u)-u\n\\]\nfor any $u\\in K$, where $p_K:\\Re^d\\to K$ is the \\emph{metric projection} onto $K$, that is, $p_K(x)$ is the unique point of $K$ that is closest to $x$.\nFurthermore, Lemma~1.8.11 of \\cite{schneider2014convex}, and the discussion preceding it state that the mapping $(K,x)\\mapsto p_K(x)$ is continuous in both arguments.\n\nSuppose for a contradiction that $v$ is not in $\\nfcone{K}{u}$, that is, \n$p_{K}(u + v)\\neq u$. Then, by the continuity of $(K,x)\\mapsto p_K(x)$ in the second variable, there is a neighborhood $U$ of $u$, and a neighborhood $V$ of $u + v$ such that $p_{K}(V)\\cap U = \\emptyset$.\nIn turn, by the continuity of $(K,x)\\mapsto p_K(x)$ in the first variable, there is a neighborhood $U^{\\prime}$ of $u$ inside $U$ such that $p_{K}(V)\\cap U^{\\prime}=\\emptyset$ for all sufficiently large $i \\in {\\mathbb N}$. This clearly contradicts the assumptions.\n\\end{proof}\n\n\nBy the symmetry of $\\lifting{f}$ about $\\Re^d$ and the convexity of $\\supp{f}$, one obtains the following.\n\\begin{lem}[The normal cone of $\\lifting{f}$ at the boundary of $\\supp{f}$]\\label{lem:nfcone_at_zero}\nLet $f \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function\nand let $u \\in \\partial{\\supp f}$. Set $\\upthing{u} = (u, f(u))$. Then \n\\[\nP_d \\parenth{\\nfcone{\\lifting{f}}{\\upthing{u}}} = \n\\nfcone{\\lifting{f}}{\\upthing{u}} \\cap \\Re^d = \n\\nfcone{\\supp{f}}{u},\n\\]\nwhere the last normal cone is considered in $\\Re^d$, and $P_d:\\Re^{d+1}\\to\\Re^d$ denotes the orthogonal projection onto the first $d$ coordinates.\n\\end{lem}\n\n\n\nThe following simple lemma describes locally the normal cone of $\\lifting{f}$ in terms of the normal cone of $\\operatorname{epi}(-\\ln f)$.\n\n\\begin{lem}[The normal cone of $\\lifting{f}$ in $\\supp{f}$]\\label{lem:nfcone}\nLet $f= e^{-\\psi} \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function. \nFix a point $u \\in \\supp f$, a scalar $\\nu \\in {\\mathbb R}$, and set $\\upthing{u} = (u, f(u))$.\nThen \n\\begin{align}\\label{eq:normalconecomaprison}\n \\parenth{v,\\nu}\\in\\nfcone{\\operatorname{epi}\\psi}{(u,\\psi(u))} \\text{ if and only if }\n \\parenth{v,\\frac{-\\nu}{f(u)}}\\in\\nfcone{\\lifting{f}}{(u,f(u))}.\n \\end{align}\n Furthermore,\n \\begin{equation}\\label{eq:normalconelevelset}\n P_d\\left(\\nfcone{\\lifting{f}}{(u,f(u))}\\right)\\subseteq \\nfcone{[f\\geq f(u)]}{u}, \n \\end{equation}\nwhere $P_d:\\Re^{d+1}\\to\\Re^d$ denotes the orthogonal projection onto the first $d$ coordinates.\n\\end{lem}\n\\begin{proof}\nClearly, $\\nu$ cannot be positive. \nIf $\\nu=0$, then by \\Href{Lemma}{lem:nfcone_at_zero},\nthe leftmost inclusion is equivalent to $v \\in \\nfcone{\\dom \\psi}{u}$,\nand the rightmost is equivalent to $v \\in \\nfcone{\\supp f}{u},$ which are \nequivalent by \\eqref{eq:suppequalsdom}. \n\n\n\n\n\nWe thus assume that $\\nu<0$. Since $\\psi$ is a convex function, \n$\\parenth{v,\\nu}\\in\\nfcone{\\operatorname{epi}\\psi}{(u,\\psi(u))}$ holds if and only if\n\\begin{equation*\n \\psi(x)\\geq\\psi(u)+\\iprod{u-x}{\\frac{v}{\\nu}} \n\\end{equation*}\nfor all $x \\in {\\mathbb R}^d$, which yields by exponentiation\n\\[\n f(x)= e^{-\\psi(x)} \\leq f(u)e^{\\iprod{x-u}{\\frac{v}{\\nu}}}=\n f(u)\\left[1+\\iprod{x-u}{\\frac{v}{\\nu}}\\right]+\\littleo{|x-u|}.\n\\]\nThe latter, by the definition of the normal cone, is equivalent to\n\\begin{equation}\\label{eq:vnuinnfcone}\n \\parenth{v,\\frac{-\\nu}{f(u)}}\\in\\nfcone{\\lifting{f}}{(u,f(u))}, \n\\end{equation}\ncompleting the proof of one implication in \\eqref{eq:normalconecomaprison}. \n\nFor the other direction, assume \\eqref{eq:vnuinnfcone}, that is,\n\\[\n f(x)\\leq f(u)\\left[1+\\iprod{x-u}{\\frac{v}{\\nu}}\\right]+\\littleo{|x-u|}.\n\\]\nSince $1+t0$. By \ntaking logarithm, we obtain \n$\\psi(x)\\geq\\psi(u)+\\iprod{u-x}{\\frac{v}{\\nu}}+\\littleo{|x-u|}$, and hence, \n$\\parenth{v,\\nu}\\in\\nfcone{\\operatorname{epi}\\psi}{(u,\\psi(u)}$.\n\nEquation \\eqref{eq:normalconelevelset} is a direct consequence of the definition of the normal cone and can be easily shown to hold for any function without the assumption of log-concavity. The proof of \\Href{Lemma}{lem:nfcone} is complete.\n\\end{proof}\n\n\n\nSince the normal cone to a convex subset of ${\\mathbb R}^d$ at \nany point of its boundary contains non-zero vectors, the two previous lemmas yield the following.\n\\begin{cor}[The normal cone is not empty]\\label{cor:normalcone_nonempty}\nLet $f \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function.\nThe normal cone to $\\lifting{f}$ at any point of \n$\\mathrm{ess}\\ \\mathrm{graph}\\ {f}$ contains non-zero vectors.\n\\end{cor}\n\n\\begin{lem}[Regularity of the normal cone of $\\lifting{f}$]\\label{lem:nfconeregularity}\nLet $f \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function. \nFix $\\varepsilon>0$ and a point $u \\in \\partial{\\supp f}$ with $f(u)=0$.\nThen there is a $\\delta>0$ such that for every \n${u}_1 \\in \\partial{\\supp{f}} \\cap \\parenth{\\delta\\ball{d} +u}$ and every \n $\\parenth{v_1,\\nu_1}\\in\\nfcone{\\lifting{f}}{({u}_1, f(u_1))}$, there is a\n $v\\in\\nfcone{\\cl{\\supp f}}{u}$ with $\\iprod{\\frac{v_1}{\\enorm{v_1}}}{\\frac{v}{\\enorm{v}}}>1-\\varepsilon$.\n \\end{lem}\n\\begin{proof}\nObserve that if \n$\\parenth{v_1,\\nu_1}\\in\\nfcone{\\lifting{f}}{(u_1, f(u_1))}$ for some point \n$u_1\\in\\cl{\\supp{f}}$, then $v_1\\in\\nfcone{[f\\geq f(u_1)]}{u_1}$. Thus, if $\\delta$ is sufficiently small, then $f(u_1)$ is close to zero, and hence, $[f\\geq f(u_1)] \\cap\\parenth{\\ball{d} + u}$ is close to $\\cl{\\supp{f}} \\cap \\parenth {\\ball{d} + u}.$ \nBy applying \\Href{Proposition}{prop:normalconesemicont}, we complete the proof.\n\\end{proof}\n\n\n\n\\Href{Lemma}{lem:nfcone} gives a description of the normal cone of $\\lifting{f}$ which yields the following \nlocal description of $\\lifting{f}$ in terms of curves.\n\n\\begin{lem}[Local description of $\\lifting{f}$ at a non-zero point in terms of a curve]\\label{lem:curve_in_the_lifting}\nLet $f= e^{-\\psi} \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function. \nLet $u \\in {\\supp f}$ and set $\\upthing{u} = (u, f(u))$. \nLet $\\upthing{\\xi}(t) \\colon [0,1] \\to {\\mathbb R}^{d+1}$ be a curve such that\n$\\upthing{\\xi}(0) = (u, f(u))$ and the right derivative $\\upthing{\\xi}^{\\prime}(0)$ at zero exists.\nConsider the following statements:\n\\begin{enumerate}[(a)]\n\\item\\label{item:curve_linearized_in_primal}\n\\[\\upthing{\\xi}^{\\prime}(0) \\in \n\\operatorname{int} \\parenth{\\nfcone{\\lifting{f}}{\\upthing{\\xi}(0) }}^{\\circ}.\n\\]\n\\item\\label{item:curve_inclusion} There is a positive $\\varepsilon$ such that \n\\[\n\\upthing{\\xi}(t)\\in \\lifting{f}\n\\;\\;\\;\\text{ for all } t \\in [0, \\varepsilon].\n\\] \n\\item\\label{item:curve_linearized_in_primalweak} \n\\[\n\\upthing{\\xi}^{\\prime}(0)\\in \n \\parenth{\\nfcone{\\lifting{f}}{\\upthing{\\xi}(0) }}^{\\circ}.\n\\]\n\\end{enumerate}\n Then \\eqref{item:curve_linearized_in_primal} implies \\eqref{item:curve_inclusion}, and \\eqref{item:curve_inclusion} implies \\eqref{item:curve_linearized_in_primalweak}.\n\\end{lem}\n\n\\begin{proof}\nWith the identification $\\Re^{d+1}=\\Re^d\\oplus\\Re$, we split the coordinates of $\\upthing{\\xi}(t)$ as $\\upthing{\\xi}(t)= \\parenth{\\xi(t),\\mu(t)}$ that is, $\\xi\\colon[0,1]\\to\\Re^d$ and $\\mu\\colon[0,1]\\to\\Re$ with $\\xi(0)=u$ and $\\mu(0)=f(u)$.\n\nClearly, if $\\lifting{f}$ was a convex set in $\\Re^{d+1}$, then the statement would follow from basic properties of supporting hyperplanes to convex sets. We will use the fact that even though $\\lifting{f}$ is not convex in general, but the epigraph of $\\psi$ is. The proof relies on translating the question on the behavior of $\\upthing{\\xi}$ with respect to $\\lifting{f}$ to a question concerning the behavior of the curve\n \\[\n \\upthing{\\eta}(t)=\\parenth{\\xi(t),-\\log \\mu(t)}\n \\]\nwith respect to $\\operatorname{epi}{\\psi}$.\n \nSince $f$ is upper semi-continuous and log-concave, $\\operatorname{epi}{\\psi}$ is a closed convex set in $\\Re^{d+1}$ with nonempty interior. It follows that \n\\begin{equation}\\label{eq:curve_linearized_in_primal_eta}\n \\upthing{\\eta}^{\\prime}(0)\\in\n \\operatorname{int} \\parenth{\\nfcone{\\operatorname{epi}\\psi}{\\upthing{\\eta}(0) }}^{\\circ}\n\\end{equation}\nimplies\n\\begin{equation}\\label{eq:curve_inclusion_eta}\n \\upthing{\\eta}(t)\\in\\operatorname{epi}{\\psi}\\text{ for all }t\\in[0,\\varepsilon], \\text{ with some }\\varepsilon>0,\n\\end{equation}\nwhich, in turn, implies\n\\begin{equation}\\label{eq:curve_linearized_in_primalweak_eta}\n \\upthing{\\eta}^{\\prime}(0)\\in\n \\parenth{\\nfcone{\\operatorname{epi}\\psi}{\\upthing{\\eta}(0) }}^{\\circ}.\n\\end{equation}\n\nClearly, statement \\eqref{item:curve_inclusion} of the lemma is equivalent to \n\\eqref{eq:curve_inclusion_eta}, thus, in order to prove the lemma, we need to \nshow the equivalence of inclusions \\eqref{item:curve_linearized_in_primal} and \n\\eqref{eq:curve_linearized_in_primal_eta}, and the equivalence of inclusions \n\\eqref{item:curve_linearized_in_primalweak} and \n\\eqref{eq:curve_linearized_in_primalweak_eta}.\n\n\nUsing\n$\\upthing{\\eta}^{\\prime}(0)=\\parenth{\\xi^{\\prime}(0), \\frac{-\\mu^{\\prime}(0)}{f(u)}}$\nand the definition of the polar cone, we have\n\\[\n \\upthing{\\eta}^{\\prime}(0)\\in\n \\parenth{\\nfcone{\\operatorname{epi}{\\psi}}{\\upthing{\\eta}(0) }}^{\\circ} \\Longleftrightarrow\n\\iprod{\\parenth{\\xi^{\\prime}(0), \\frac{-\\mu^{\\prime}(0)}{f(u)}}}{\\parenth{v,\\nu}}\\leq 0\n\\text{ for all }\\parenth{v,\\nu}\\in\\nfcone{\\operatorname{epi}{\\psi}}{\\upthing{\\eta}(0)}.\n\\]\nThe latter, by \\eqref{eq:normalconecomaprison}, is equivalent to\n\\[ \n\\iprod{\\parenth{\\xi^{\\prime}(0), \\mu^{\\prime}(0)}}{\\parenth{v,\\frac{-\\nu}{f(u)}}}\\leq 0\n\\text{ for all }\\parenth{v,\\frac{-\\nu}{f(u)}}\\in\\nfcone{\\lifting{f}}{\\upthing{\\xi}(0)},\n\\]\nwhich, in turn, is equivalent to \n\\[\n \\upthing{\\xi}^{\\prime}(0)\\in \n \\parenth{\\nfcone{\\lifting{f}}{\\upthing{\\xi}(0) }}^{\\circ}.\n\\]\n\nIn summary, \\eqref{eq:curve_linearized_in_primalweak_eta} is equivalent to statement \\eqref{item:curve_linearized_in_primalweak} of the lemma.\nThe equivalence of \\eqref{eq:curve_linearized_in_primal_eta} and\nstatement \\eqref{item:curve_linearized_in_primal} is shown the same way.\n\\end{proof}\n\n\n\nSince $\\cl{\\supp{f}}=\\lifting{f}\\cap\\Re^d$ is a closed convex set in $\\Re^d$, we have the following statement.\n\\begin{lem}[Local description of $\\lifting{f}$ at a zero point in terms of a horizontal curve]\\label{lem:flat_curve_in_the_lifting}\nLet $f \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous log-concave function. \nLet $u \\in \\cl{\\supp f}$ such that $f(u)= 0.$ Denote $\\upthing{u} = (u, f(u))=(u,0)$. \nLet $\\upthing{\\xi}(t) \\colon [0,1] \\to \\Re^d$ be a curve such that\n$\\upthing{\\xi}(0) = (u, 0)$ and the right derivative $\\upthing{\\xi}^{\\prime}(0)$ at zero exists.\nConsider the following statements:\n\\begin{enumerate}[(a)]\n\\item\\label{item:flat_curve_linearized_in_primal}\n\\[\\upthing{\\xi}^{\\prime}(0) \\in \n\\operatorname{int} \\parenth{\\nfcone{\\supp{f}}{\\upthing{\\xi}(0) }}^{\\circ}.\n\\]\n\\item\\label{item:flat_curve_inclusion} There is a positive $\\varepsilon$ such that \n\\[\n\\upthing{\\xi}(t)\\in \\lifting{f}\n\\;\\;\\;\\text{ for all } t \\in [0, \\varepsilon].\n\\] \n\\item\\label{item:flat_curve_linearized_in_primalweak} \n\\[\n\\upthing{\\xi}^{\\prime}(0)\\in \n \\parenth{\\nfcone{\\supp{f}}{\\upthing{\\xi}(0) }}^{\\circ}.\n\\]\n\\end{enumerate}\nThen the implications \\eqref{item:curve_linearized_in_primal} $\\Rightarrow$ \\eqref{item:curve_inclusion} $\\Rightarrow$ \\eqref{item:curve_linearized_in_primalweak} hold, where polarity is meant in $\\Re^d$.\n\\end{lem}\n\n\n\n\n\n\n\n\n\\section{John's problem}\\label{sec:john}\n\nFix $s > 0$, and two functions $f, g \\colon \\Re^d \\to [0, \\infty)$. \nIn addition to John $s$-problem \\eqref{eq:john_problem_intro}, we will consider the following optimization problem. \n\\medskip\n\n\\textbf{Positive position John $s$-problem for $f$ and $g$:} Find\n\\begin{equation}\\label{eq:john_problem_pos}\n\\max\\limits_{h \\in \\funppos{g} }\n\t\\int_{\\Re^d} h^s \n\t\\quad \\text{subject to} \\quad\n\th \\leq f.\n\\end{equation}\n\\medskip \n\nWe say that $g$ is a \\emph{global maximizer} in the (Positive position) John $s$-problem, if \nfor any $(A\\oplus\\upalpha,a)$ in $\\mathcal{M}$ (resp., in $\\mathcal{M}^+$), we have that $\\int_{\\Re^d} h^s \\leq \\int_{\\Re^d} g^s$ whenever $h\\leq f$ and $h(x)=\\upalpha h(Ax+a)$. \nOn the other hand, $g$ is a \\emph{local maximizer} in the (Positive position) John $s$-problem, if there is a neighborhood $\\mathcal U$ of $( \\mathrm{Id}_d\\oplus 1, 0)$ in $\\mathcal{M}$ (resp., in $\\mathcal{M}^+$) such that for any $(A\\oplus\\upalpha,a)\\in\\mathcal U$, we have that $\\int_{\\Re^d} h^s \\leq \\int_{\\Re^d} g^s$ whenever $h\\leq f$ and $h(x)=\\upalpha h(Ax+a)$. \n\n\n\n\n\n\n\n\nWe are ready to state our first main result, a general version of \n\\Href{Theorem}{thm:john_intro}. The assumptions of the theorem are quite \ntechnical, they will be explained in \n\\Href{Section}{sec:corollaries_and_discussion}, where we discuss natural \nsituations when they hold.\n\n\n\n\n\\begin{thm}[John's condition]\\label{thm:john_condition_general}\nFix $s > 0$. Let $f\\colon\\Re^d \\to [0,+\\infty)$ be a proper log-concave function,\nand let $g_b \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous function \nsuch that \n\\begin{itemize}\n\\item $\\logconc{g_b}$ satisfies our Basic Assumptions (see page~\\pageref{assumptions:basic}); \n\\item the set of contact points $\\contactpoint{f}{g_b}$ is a star-like set with respect to~ ${f}$;\n\\item the reduced set of contact pairs $\\contactset{f}{g_b}$ is bounded. \n\\end{itemize} \nThen setting $g =\\logconc{g_b}$, the following hold:\n\t\\begin{enumerate}\n\t\t\\item\\label{item:local-maximum-implies-glmp}\n\t\t\tIf $h=g$ is a local maximizer in John $s$-problem \\eqref{eq:john_problem_intro} for $f$ and $g$,\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\upthing{u}_1,\\upthing{v}_1)$, $\\dots$, \n\t\t\t$(\\upthing{u}_m,\\upthing{v}_m)$ $\\in \\contactset{f}{g_b}$\n\t\t\tand positive weights $c_1,\\dots,c_m$ such that\n\t\t\\begin{equation}\\label{eq:functional_glmp}\n\t\t\\sum_{i=1}^{m} c_i {u}_i \\otimes {v}_i = \n\t\t \\mathrm{Id}_{d}, \\quad \n\t\t \\sum_{i=1}^{m} c_i f(u_i)\\nu_i = s\n\t\t\t\t\t\\quad\\text{and}\\quad\n\t\t\t\t\t\\sum_{i=1}^{m} c_i v_i=0,\n\t\t\t\t\\end{equation}\n where $\\upthing{u}_i=(u_i, f(u_i))$ and $\\upthing{v}_i=(v_i,\\nu_i)$.\n\t\t\\item\\label{item:glmp-implies-global-maximum}\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp},\n\t\t\tthen $g$ is a global maximizer in Positive position John $s$-problem \\eqref{eq:john_problem_pos} for $f$ and $g$.\n\t\\end{enumerate}\n\\end{thm}\n\nWe emphasize that in this result, we maximize the integral of the log-concave \nenvelope of a given function $g_b$, \nbut we use the contact pairs of $f$ and $g_b$. It allows us to consider the case when the integral of $g_b$ is zero, for example, if $g_b$ has a finite number of non-zero values. A similar results for not necessarily log-concave function $g_b$ immediately follows from \\Href{Theorem}{thm:john_condition_general}, because a position of $g$ is below $f$ if and only if the corresponding position of $g_b$ is below $f$ by the log-concavity of $f$.\n\\begin{cor}\n Let the functions $f,g_b \\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}. In addition, let the integral of $g_b$ be positive. Then \n \t\\begin{enumerate}\n\t\t\\item \n\t\t\tIf $h=g_b$ is a local maximizer in John $s$-problem \\eqref{eq:john_problem_intro} for $f$ and $g_b$,\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactset{f}{g_b}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ satisfying \\eqref{eq:functional_glmp}.\n\t\t\\item\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp},\n\t\t\tthen $h=g_b$ is a global maximizer in Positive position John $s$-problem \\eqref{eq:john_problem_pos} for $f$ and $g_b$.\n\t\\end{enumerate}\n\\end{cor}\n\nIn \\Href{Section}{subsec:sufficient_conditions}, we will discuss conditions on $g_b$ which guarantee that the conditions of \\Href{Theorem}{thm:john_condition_general} on $f$ and $g_b$ hold with \\emph{any} proper log-concave function $f$.\n\n\n\\subsection{Strategy of the proof of \n\\texorpdfstring{\\Href{Theorem}{thm:john_condition_general}}{\nTheorem~\\ref{thm:john_condition_general}}}\n\n\\begin{dfn}\\label{dfn:contactop}\nFor any $(\\upthing{u},\\upthing{v})\\in\\Re^{d+1}\\times\\Re^{d+1}$, \nwe define the \\emph{John-type extended contact operator} by\n\\[\n\\contactopjohn{\\upthing{u}}{\\upthing{v}} = \\parenth{\\parenth{u \\otimes v} \\oplus \\mu\\nu,v}\\in \\mathcal{W},\n\\]\nwhere $\\upthing{u}=(u,\\mu)\\in\\Re^d\\oplus\\Re=\\Re^{d+1}$, and $\\upthing{v}=(v,\\nu)\\in\\Re^d\\oplus\\Re=\\Re^{d+1}$.\n\nFor two functions $f,g:\\Re^d\\to[0,\\infty)$, we denote the reduced set of John-type extended contact operators by\n \\begin{equation}\\label{eq:johncontactoperators}\n \\contactopsetjohn{f}{g}=\n \\big\\{\\contactopjohn{\\upthing{u}}{\\upthing{v}} \\colon\n (\\upthing{u},\\upthing{v})\\in \\contactset{f}{g} \\big\\}\\subset\\mathcal{W}.\n \\end{equation}\n\\end{dfn}\n\n\nWe break up the proof of \\Href{Theorem}{thm:john_condition_general} into several steps. \nFirst, we will use a geometric reformulation of the equations in \n\\eqref{eq:functional_glmp}. It is easy to see that those equations encode the \nfact that the point $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0} \\in \\mathcal{M}$ is in the positive \ncone of the set \n$\\contactopsetjohn{f}{g_b}$ in $\\mathcal{W}$. It then follows that if no set of \ncontact pairs satisfies \\eqref{eq:functional_glmp}, then there is a linear \nhyperplane separating the point $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0}$ and the set \n$\\contactopsetjohn{f}{g_b}$. \n \nWe then aim at turning this separation of $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0}$ and \nthe set $\\contactopsetjohn{f}{g_b}$ into a perturbation of the function $g$ \nwhich is of greater integral than $g$, and is still pointwise below $f$. \nHowever, to obtain this perturbation of $g$, we need strong separation, which poses an \nimportant difficulty. To solve it, we need to show that \n$\\contactopsetjohn{f}{g_b}$ is compact in $\\mathcal{W}$. To that end, we will show \nthat the boundedness of the set of contact pairs $\\contactset{f}{g_b}$ yields \nthe compactness of $\\contactopsetjohn{f}{g_b}$. Note that we will not \ninvestigate the boundedness of $\\contactset{f}{g_b}$ itself in the current \nsection, it will be addressed in \\Href{Section}{sec:boundedcontactpairs}.\n\nThus, to prove \\eqref{item:local-maximum-implies-glmp} of \\Href{Theorem}{thm:john_condition_general}, we first write strong separation analytically: if no contact pairs yield \\eqref{eq:functional_glmp}, then there is an $(H\\oplus\\gamma,h)\\in\\mathcal{W}$ (a normal to the separating hyperplane) such that\n\\begin{equation}\\label{eq:separation_explanatory_part}\n\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t{\\parenth{\\sid,0}} > 0\n\t\\quad\\text{and}\\quad\n\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t{A_J} < 0\n\\end{equation}\nfor all $A_J \\in \\contactopsetjohn{f}{g_b}$.\n\nTo obtain the needed perturbation of $g$, we construct a certain kind of \n``average'' of two functions below a given log-concave function $f$ so that the \nnew function remains below $f$. This will be a straight forward adjustment of averaging \ntwo positions of a set inside a given convex set. Using this averaging \nconstruction, we will construct a curve $\\Gamma_t, t \\in [0, \\tau]$ in $\\mathcal{M}$ \nstarting at $\\parenth{\\mathrm{Id}_{d+1},0}$ such that its directional vector at zero is precisely the normal vector $ \n\\parenth{H \\oplus \\gamma,h}$. Positions of $g$ correspond to the points of \n$\\Gamma_t$ in a natural way: $\\upalpha g\\!\\parenth{A^{-1}(x-a)}$ corresponds to \n$\\parenth{A \\oplus \\upalpha, a} \\in \\mathcal{M}$. That is, the position corresponding to \n$\\Gamma_0=\\parenth{\\mathrm{Id}_{d+1}, 0}$ is $g$ itself, and $\\Gamma_t, t \\in [0,\\tau]$ can be seen as a \nhomotopy of $\\lifting{g}$ for a sufficiently small positive $\\tau.$ \n\nThe next step is to see what properties of the curve $\\Gamma_t$, $t \\in [0, \\tau]$,\nshould possess in order to guarantee the inequalities in \n\\eqref{eq:separation_explanatory_part}. It turns out that the leftmost \ninequality in \\eqref{eq:separation_explanatory_part} essentially means that the \nintegral of a position of $g$ corresponding to a point $\\Gamma_t$ is greater \nthan the integral of $g$ itself for all sufficiently small $t.$ \n\\begin{remark}\nWe note that the comparison of integrals appears only at this step. So using our approach, one might maximize more sophisticated functionals than the $L_s$-norm of $g$. \n\\end{remark}\n\nFinally, we will show that the rightmost inequality in \\eqref{eq:separation_explanatory_part} essentially means that the position of $g$ corresponding to $\\Gamma_t$ remains below $f$ for all sufficiently small $t$.\n\nTo put everything together, in the proof of necessary condition \\eqref{item:local-maximum-implies-glmp}, we will assume that \nthe point $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0}$ is strongly separated from the set $\\contactopsetjohn{f}{g_b}$ in $\\mathcal{W}$. Then, we will construct a curve $\\Gamma_{t}$ in $\\mathcal{W}$ using the normal vector of the separating hyperplane, and after that, we will show that it defines a homotopy of $\\lifting{g}$ with the desired property. In the proof of sufficient condition \\eqref{item:glmp-implies-global-maximum}, we will assume that $g$ is not the global maximizer, and we will construct a curve $\\Gamma_t$ in $\\mathcal{W}$ using our averaging construction and then, we will show that the directional vector of $\\Gamma_t$ is the normal vector of a hyperplane that separates $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0}$ from $\\contactopsetjohn{f}{g_b}$ in $\\mathcal{W}$.\n\n\\subsection{Main components of the proof of \\texorpdfstring{\\Href{Theorem}{thm:john_condition_general}}{Theorem~\\ref{thm:john_condition_general}}}\nBy a routine compactness argument and by \\Href{Corollary}{cor:normalcone_nonempty}, on has the following. \n\\begin{lem}\n Let functions $f,g_b\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}. Assume $h=g$ is a local maximizer in John $s$-problem \\eqref{eq:john_problem_intro}. Then the sets $\\contactset{f}{g_b}$ and $\\contactpoint{f}{g_b}$ are non-empty.\n\\end{lem}\n\nLet us show that the boundedness of the set of contact pairs yields the compactness of the set of contact operators.\n\\begin{lem}[Compactness of the set of contact operators]\\label{lem:compact_contact_operator}\n Let functions $f,g_b\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}. Then $\\contactopsetjohn{f}{g_b}$ is a compact subset of $\\mathcal{W}$.\n\\end{lem}\n\n\\begin{proof}\n\n\nThe definition of $\\contactopsetjohn{f}{g_b}$ and the boundedness of\n$\\contactset{f}{g_b}$ imply that $\\contactopsetjohn{f}{g_b}$ is bounded.\n\n\nLet us show that $\\contactopsetjohn{f}{g_b}$ is closed. \nConsider a sequence $\\{(\\upthing{u}_i, \\upthing{v}_i)\\} \\subset \\contactset{f}{g_b}$, where $\\upthing{u}_i = (u_i, \\mu_i)$ and $\\upthing{v}_i = (v_i, \\nu_i),$ such that $\\contactopjohn{\\upthing{u}_i}{\\upthing{v}_i}$ is convergent.\nSince $\\contactset{f}{g_b}$ is bounded, we may assume by passing to a subsequence that \n$\\lim\\limits_{i \\to \\infty} \\upthing{u}_i = \\upthing{u} = (u, \\mu)$ and \n$\\lim\\limits_{i \\to \\infty} \\upthing{v}_i = \\upthing{v} = \\upthing{v} = (v, \\nu)$.\nBy upper semi-continuity, $(u, \\mu) \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f} \\cap \\mathrm{ess}\\ \\mathrm{graph}\\ {g_b}.$\n\n\nIf $\\mu > 0$, then the convex function $-\\ln f$ is finite in some open neighborhood of $u$ in its effective domain. Using \\Href{Lemma}{lem:nfcone} and then applying\n\\Href{Proposition}{prop:normalconesemicont} to the compact convex set \n$K = \\{(x, h) \\colon -\\ln f(x) \\leq h \\leq - \\ln f(u) +1 \\}$ yield\n\\[\n\\upthing{v} \\in \\nfcone{\\lifting{f}}{\\upthing{u}}.\n\\]\n\nNow, consider the case $\\mu = 0$.\nSince the set $\\{\\nu_i \\colon i\\in\\mathbb Z^+\\}$ is bounded and \n$\\lim\\limits_{i \\to \\infty} \\mu_i = 0$, we have\n\\[\n\\lim\\limits_{i \\to \\infty} \\mu_i \\nu_i = 0 = \\mu\\nu.\n\\]\nHence, $\\iprod{\\upthing{v}}{\\upthing{u}} = \\iprod{v}{u}.$ \nBy this and by \\Href{Lemma}{lem:nfcone_at_zero}, it suffices to show that $v \\in \\nfcone{\\supp f}{u} \\subset \\Re^d$. \n\nBy the assumptions of \\Href{Theorem}{thm:john_condition_general}, the origin is in the interior of $\\supp{f}$. Consider the sequence of compact convex sets \n$\\left[ f \\geq f(u_i) \\right] \\cap 2\\enorm{u}\\ball{d}.$ This sequence converges to $\\cl{\\supp{f}} \\cap (2\\enorm{u}\\ball{d})$. By \\eqref{eq:normalconelevelset},\n $v_i \\in \\nfcone{\\left[ f \\geq f(u_i) \\right] \\cap 2\\enorm{u}\\ball{d}}{u_i}.$ Thus, using \\Href{Lemma} {lem:nfconeregularity}, we conclude $\\lim\\limits_{i \\to \\infty} v_i= v$ \n belongs to $\\nfcone{\\supp f}{u}.$ \n Consequently, $\\contactopsetjohn{f}{g_b}$ is a closed bounded set of the finite-dimensional vector space $\\mathcal{W}$ and hence, it is compact. \n\\end{proof}\n\n\nNext, we reformulate equation \\eqref{eq:functional_glmp} in terms of separation of a closed convex set from a point in the finite dimensional real vector space $\\mathcal{W}$.\n\n\\begin{lem}[Separation of operators]\\label{lem:separation_John_problem}\n Let the functions $f,g_b \\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}. Then the following assertions are equivalent:\n\\begin{enumerate}\n\\item\\label{item:nocontacts}\nThere are no contact pairs of $f$ and $g_b$ and positive weights satisfying equation \\eqref{eq:functional_glmp}.\n\\item\\label{item:strictseparation}\nThere exists $ \\parenth{H \\oplus \\gamma, h}\\in\\mathcal{W}$ such that\n\t\t\\begin{equation}\\label{eq:john_s-concave-stict-separation}\n\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\parenth{\\sid,0}} > 0\n\t\t\t\\quad\\text{and}\\quad\n\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\contactopjohn{\\upthing{u}}{\\upthing{v}}} < 0\n\t\t\\end{equation}\n\t\tfor all $(\\upthing{u},\\upthing{v})\\in \\contactset{f}{g_b}$.\n\\item\\label{item:weakseparation}\nThere exists $ \\parenth{H \\oplus \\gamma, h}\\in\\mathcal{W}$ such that\n\t\t\\begin{equation}\\label{eq:john_s-concave-nonstrict-separation}\n\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\parenth{\\sid,0}} > 0\n\t\t\t\\quad\\text{and}\\quad\n\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\contactopjohn{\\upthing{u}}{\\upthing{v}}} \\leq 0\n\t\t\\end{equation}\n\t\tfor all $(\\upthing{u},\\upthing{v})\\in \\contactset{f}{g_b}$.\n\\end{enumerate}\n\\end{lem}\n\n\\begin{proof}[Proof of \\Href{Lemma}{lem:separation_John_problem}]\nFor any $(\\upthing{u},\\upthing{v})\\in\\contactset{f}{g_b}$, we have $\\iprod{\\upthing{u}}{\\upthing{v}}=1$ thus, $\\tr{\\parenth{u\\otimes v}}+f(u)\\nu=1$. Taking trace in the first equation in \\eqref{eq:functional_glmp} and adding it to the second equation therein yields that $\\sum_{i=1}^m c_i=d+s$. \nIt follows that assertion \\eqref{item:nocontacts} of the lemma is equivalent to the assertion that $\\frac{1}{d+s}\\parenth{\\sid,0}$ is not in the convex hull of\nthe set $\\contactopsetjohn{f}{g_b}$ of John-type extended contact operators defined by \\eqref{eq:johncontactoperators}.\nBy \\Href{Lemma}{lem:compact_contact_operator}, $\\contactopsetjohn{f}{g_b}$ is compact. \nNote that both the set $\\contactopsetjohn{f}{g_b}$ and the point $\\frac{1}{d+s}\\parenth{\\sid,0}$ belong to the affine hyperplane \n$\n\\left\\{\n(\\mathcal{A},a) \\in \\mathcal{W} \\colon \\tr{\\mathcal{A}} = 1\n\\right\\}.\n$\nThus, the statements in \\Href{Lemma}{lem:separation_John_problem} are reduced to the separation of a point from a compact subset $\\contactopsetjohn{f}{g_b}$ by a linear hyperplane in the finite-dimensional real vector space $\\mathcal{W}$, a basic notion in convexity.\n\\end{proof}\n\nSecond, the following lemma allows us to interpolate between two functions below a given one in such a way that the new function remains below the given one.\n\\begin{lem}[Inner interpolation of functions]\\label{lem:inner-function-interpolation}\n\tLet $f \\colon \\Re^d \\to [0, +\\infty)$ be a log-concave function and $g \\colon \\Re^d \\to [0, +\\infty)$ be a function.\n\tLet $\\upalpha_1,\\upalpha_2>0$, $A_1, A_2$ be non-singular matrices of order $d,$\n\tand $a_1, a_2\\in\\Re^d$\n\tbe such that\n\t\t\\[\n\t\t\t\\upalpha_1 g(A_1^{-1} (x - a_1)) \\leq f(x)\n\t\t\t\\quad \\text{and}\\quad\n\t\t\t\\upalpha_2 g(A_2^{-1} (x - a_2)) \\leq f(x).\n\t\t\\]\n\tfor all $x\\in\\Re^d$.\n\tLet $\\beta_1,\\beta_2>0$ be such that $\\beta_1+\\beta_2=1$.\n\tDefine\n\t\t\\[\n\t\t\t\\upalpha = \\upalpha_1^{\\beta_1} \\upalpha_2^{\\beta_2},\n\t\t\t\\quad\n\t\t\tA = \\beta_1 A_1 + \\beta_2 A_2,\n\t\t\t\\quad\\text{and}\\quad\n\t\t\ta = \\beta_1 a_1 + \\beta_2 a_2.\n\t\t\\]\n\tAssume that $A$ is non-singular.\t\n\tThen\n\t\t\\begin{equation}\\label{eq:inner-function-interpolation-1}\n\t\t\t\\upalpha g(A^{-1} (x - a)) \\leq f(x).\n\t\t\\end{equation}\n\tIf $A_1$ and $A_2$ are positive definite, and $g$ is integrable, then we also have\n\t\t\\begin{equation}\\label{eq:inner-function-interpolation-2}\n\t\t\t\\int_{\\Re^d} \\upalpha g(A^{-1} (x - a)) \\di x \\geq\n\t\t\t\t\\left(\\int_{\\Re^d}\\upalpha_1 g(A_1^{-1} (x - a_1) ) \\di x\\right)^{\\beta_1}\n\t\t\t\t\\left(\\int_{\\Re^d}\\upalpha_2 g(A_2^{-1} (x - a_2) ) \\di x\\right)^{\\beta_2}\n\t\t\\end{equation}\nwith equality if and only if $A_1 = A_2.$\n\\end{lem}\n\\begin{proof}\nFix $x \\in \\Re^d$ and define\n\\[\nx_1 = A_1 A^{-1} x, \\quad x_2 = A_2 A^{-1} x. \n\\]\nBy assumption of the lemma, \n\\begin{equation}\\label{eq:inner_interp_height}\nf(x_1 + a_1) \\ge \\upalpha_1 g \\! \\parenth{A^{-1}x}\n\\quad \\text{and} \\quad \nf(x_2 + a_2) \\ge \\upalpha_2 g \\! \\parenth{A^{-1}x}.\n\\end{equation}\nBy our definitions, $ \\beta_1 (x_1 + a_1) + \\beta_2(x_2 + a_2) = x + a.$\nTherefore, by the log-concavity of $f$,\n\\[\nf(x + a) \\ge f^{\\beta_1}( x_1 + a_1) f^{\\beta_2}( x_2 + a_2),\n \\]\nwhich, by \\eqref{eq:inner_interp_height}, yields\n\\[\n{f(x + a)} \\ge \n \\upalpha g \\! \\parenth{A^{-1}x}.\n\\]\nInequality \\eqref{eq:inner-function-interpolation-1} follows.\nInequality \\eqref{eq:inner-function-interpolation-2} immediately follows from Minkowski's determinant inequality \\eqref{eq:minkowski_det_multipl_ineq}.\n\\end{proof}\n\nNext, we observe that the leftmost inequalities in \\eqref{eq:john_s-concave-stict-separation} and \\eqref{eq:john_s-concave-nonstrict-separation} compare the integrals of $g$ and a perturbation of $g$ defined by\n \\begin{equation}\\label{eq:perturbationdef}\ng_t (x) = \\upalpha_t g \\! \\parenth{A^{-1}_t(x - a_t)}, \\text{ for } x\\in\\Re^d. \n \\end{equation}\n\n\n\\begin{lem}[Integral of a perturbation of $g$]\\label{lem:john_separation_integral}\nFix $s > 0$, and let $g \\colon \\Re^d \\to [0, +\\infty)$ be a function such that $g^s$ is of finite positive integral.\n\t Let $\\Gamma(t) = (A_t \\oplus \\upalpha_t, a_t),$ $ t \\in [0,1]$ be a curve in $\\mathcal{M}$ with $(A_0 \\oplus \\upalpha_0, a_0)=(\\mathrm{Id}_{d+1},0)$, and assume that the right derivative of $\\Gamma$ at $t=0$ is of the form $(H\\oplus\\gamma,h)$. Define the perturbation ${g}_t$ of $g$ by \\eqref{eq:perturbationdef}. Consider the following statements:\n \\begin{enumerate}[(a)]\n \\item\\label{item:int_admissible_linearized_in_primal} \n\\[\t\t\t\n \t\\iprod{\\parenth{H \\oplus \\gamma,h}}{\\parenth{\\sid,0}} > 0;\n\\] \n \\item\\label{item:int_gammaadmissible} \t\n \t\t\\[\n\t\t\t\t\\int {g}_t^s>\\int g^s \n\t\t\t\\]\n\t\tfor all $t \\in (0, \\tau]$ and some $\\tau > 0;$\t\n \\item\\label{item:int_admissible_linearized_in_primalweak} \n\\[\t\t\n \t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\parenth{\\sid,0}} \\geq 0.\n\\] \n \\end{enumerate}\n Then \\eqref{item:int_admissible_linearized_in_primal} implies \\eqref{item:int_gammaadmissible}, and \\eqref{item:gammaadmissible} implies \\eqref{item:int_admissible_linearized_in_primalweak}.\n\\end{lem}\n\\begin{proof}\nOne has\n\\[\n\t\\int_{\\Re^d} {g}_t^s = \n\t \\upalpha^{s} \\det A_t \\int_{\\Re^d} g^s = \n\t\\parenth{1 + t \\gamma + \\littleo{t}}^{s} \\parenth{1 + t\\tr{H} + \\littleo{t} } \\int_{\\Re^d} g^s=\n\\]\n\\[\n=\\parenth{1 + t \\parenth{s \\gamma + \\tr{H}} + \\littleo{t} } \\int_{\\Re^d} g^s\n=\\parenth{1 + t \\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\parenth{\\sid,0}} + \\littleo{t} } \\int_{\\Re^d} g^s,\n\\]\nand the statement follows.\n\\end{proof}\n\n\nThe following lemma is an exercise in compactness.\n\\begin{lem}[Homothopy of a compact set]\\label{lem:most_general_homotopy_inclusion}\nLet $F$ be a closed set in $\\Re^d$ and $K \\subset F$ be a non-empty compact set.\nLet $\\mathcal{H} \\colon K \\times [0, \\tau] \\to \\Re^d$ be a homotopy between $K$ and some set $K_\\tau$ such that for every $u \\in \\partial F \\cap K$ and some positive $\\varepsilon_u,$ \nthe curve $\\mathcal{H} (u, t), t \\in [0, \\varepsilon_u]$ belongs to $F.$ Then there is positive $\\varepsilon$ such that\n$\\mathcal{H} (K, t) \\subset F$ for all $t \\in [0, \\varepsilon].$ \n\\end{lem}\n\\begin{proof}\nFix any $u \\in K$, and consider the continuous function\n$t\\mapsto\\operatorname{dist}(\\mathcal{H} (u, t), F)$. It is zero at $t=0$. Clearly, if $u$ belongs to the interior of $F$, then this function is zero on a proper interval $t \\in [0, \\varepsilon_u]$. By the assumption of the lemma, the same holds if $u \\in \\partial F \\cap K$. \nThe compactness of $K$ yields the assertion of the lemma.\n\\end{proof}\n\n\n\nFinally, we show that the rightmost inequalities in \\eqref{eq:john_s-concave-stict-separation} and \\eqref{eq:john_s-concave-nonstrict-separation} encode that a certain perturbation of $g$ is pointwise below $f$.\n\n\\begin{thm}[Characterization of admissible perturbations]\\label{thm:admissiblelinearized_john}\n Let the functions $f,g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}, and let $\\Gamma(t) = (A_t \\oplus \\upalpha_t, a_t), t \\in [0,1]$ be a curve in $\\mathcal{M}$ with $(A_0 \\oplus \\upalpha_0, a_0)=(\\mathrm{Id}_{d+1},0)$, and assume that the right derivative of $\\Gamma$ at $t=0$ is of the form $(H\\oplus\\gamma,h)$. Define the perturbation ${g}_t$ of ${g}$ by \\eqref{eq:perturbationdef}.\n Consider the following statements:\n \\begin{enumerate}[(a)]\n \\item\\label{item:admissible_linearized_in_primal}\n$\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\contactopjohn{\\upthing{u}}{\\upthing{v}}} < 0\n$\n\t\tfor all $(\\upthing{u},\\upthing{v})\\in \\contactset{f}{g_b}$.\n \\item\\label{item:gammaadmissible} \n There is $\\varepsilon > 0$ such that ${g}_t \\leq {f}$ for all $t\\in[0,\\varepsilon]$.\n \\item\\label{item:admissible_linearized_in_primalweak}\n$\t\t\t\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\contactopjohn{\\upthing{u}}{\\upthing{v}}} \\leq 0\n$\n\t\tfor all $(\\upthing{u},\\upthing{v})\\in \\contactset{f}{g_b}$.\n \\end{enumerate}\nThen \\eqref{item:admissible_linearized_in_primal} implies \\eqref{item:gammaadmissible}, and \\eqref{item:gammaadmissible} implies \\eqref{item:admissible_linearized_in_primalweak}.\n\\end{thm}\n\\begin{proof}\nBy the log-concavity of $f$, a position of $g$ is below $f$ if and only if the corresponding position of $g_b$ is below $f.$ Hence, it suffices to consider the perturbation of $g_b$ given by \n\\[\n\\tilde{g}_t (x) = \\upalpha_t g_b \\! \\parenth{A^{-1}_t(x - a_t)}, \\text{ for } x\\in\\Re^d.\n\\]\n\nSince our sets are symmetric about $\\Re^d$, it suffices to consider the sets $\\mathrm{ess}\\ \\mathrm{graph}\\ {\\tilde{g}_t}$. \nDefine the homothopy\n$\\mathcal{H} \\colon$ $\\parenth{\\mathrm{ess}\\ \\mathrm{graph}\\ {g_b}} \\times [0, 1] \\to {\\mathbb R}^{d+1}$ by\n\\[\n\\mathcal{H}(\\upthing{y},t)=\\parenth{A_t y + a_t,\\ \\upalpha_t g_b(y)}\n\\]\nfor all $y \\in \\cl{\\supp g_b}$ and $\\upthing{y} = (y, g_b(y)).$\nThat is,\n\\[\n\\mathcal{H}\\! \\parenth{\\mathrm{ess}\\ \\mathrm{graph}\\ {g_b},t} = \\mathrm{ess}\\ \\mathrm{graph}\\ {\\tilde{g}_t}.\n\\]\n\n\n\nConsider an arbitrary $u \\in \\cl{\\supp g_b}$ and \n$\\upthing{v}=(v,\\nu)\\in\\Re^d \\times \\Re$. Set $\\upthing{u} = (u, g_b(u))$.\nOne has\n\\[\n \\mathcal{H}^\\prime:=\\derivativeatzero\\mathcal{H}(\\upthing{u}, t)=\\parenth{ Hu + h,\\ \\gamma g_b(u)},\n\\]\nand thus, by \\Href{Definition}{dfn:contactop},\n\\begin{equation}\\label{eq:iprodvhiprodtensor}\n\\iprod{\\upthing{v}}{\\mathcal{H}^\\prime}=\\iprod{v}{Hu + h} + \n\\gamma \\nu g_b(u)= \n\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactopjohn{\\upthing{u}}{\\upthing{v}}}.\n\\end{equation}\n\nConsider the case $g_b(u)>0$.\nWe recall the assumption of \\Href{Theorem}{thm:john_condition_general} according to which \n$\\contactpoint{f}{g_b}$ is a star-like set with respect to~ $f$. \nUsing \\Href{Lemma}{lem:locstar_geom_meaning} in identity \\eqref{eq:iprodvhiprodtensor}, \nwe see that for a fixed $\\upthing{u} \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f}$ with non-zero last coordinate, assertion \\eqref{item:admissible_linearized_in_primal} of the theorem is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\operatorname{int} \\parenth{\\nfcone{\\lifting{f}}{\\upthing{u}}}^{\\circ}.\n\\]\nSimilarly, assertion \\eqref{item:admissible_linearized_in_primalweak} of \\Href{Theorem}{thm:admissiblelinearized_john} is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\parenth{\\nfcone{\\lifting{f}}{\\upthing{u}}}^{\\circ}.\n\\]\n\nConsider the case $g_b(u)=0$.\nSince $\\supp \\logconc{g}$ is bounded and contains the origin in its interior by the assumption of \\Href{Theorem}{thm:john_condition_general}, the origin is contained in the interior of $\\supp f.$ This ensures that at every contact point $\\upthing{u}\\in \\lifting{g_b}\\cap\\partial{\\lifting{f}}$ with $f(u) = 0$, every outer normal direction to \n$\\supp{f}$ has an acute angle with $\\upthing{u} = (u,0) \\in \\Re^d$, and hence, it can be represented by a vector $\\upthing{v}= (v,0)\\in {\\mathbb R}^d$ such that $\\iprod{\\upthing{u}}{\\upthing{v}}=1$, which yields $(\\upthing{u},\\upthing{v})\\in\\contactset{\\lifting{g_b}}{f}$.\n\nIt follows from identity \\eqref{eq:iprodvhiprodtensor} that for a fixed $\\upthing{u}$ whose last coordinate is zero, assertion \\eqref{item:admissible_linearized_in_primal} of the theorem is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\operatorname{int} \\parenth{\\nfcone{\\supp{f}}{\\upthing{u}}}^{\\circ},\n\\]\nwhere all the sets and the polarity are meant in $\\Re^d.$\nSimilarly, assertion \\eqref{item:admissible_linearized_in_primalweak} of \\Href{Theorem}{thm:admissiblelinearized_john} is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\parenth{\\nfcone{\\supp{f}}{\\upthing{u}}}^{\\circ}.\n\\]\n\nConsequently, the implication \\eqref{item:curve_inclusion} $\\Rightarrow$ \\eqref{item:curve_linearized_in_primalweak} of \\Href{Lemma}{lem:curve_in_the_lifting} and the implication \\eqref{item:flat_curve_inclusion} $\\Rightarrow$ \\eqref{item:flat_curve_linearized_in_primalweak} of \\Href{Lemma}{lem:flat_curve_in_the_lifting} yield \\eqref{item:gammaadmissible} $\\Rightarrow$ \\eqref{item:admissible_linearized_in_primalweak} in the theorem.\n\n\n To obtain the implication \\eqref{item:admissible_linearized_in_primal} $\\Rightarrow$ \\eqref{item:gammaadmissible} of \\Href{Theorem}{thm:admissiblelinearized_john}, we will use \\Href{Lemma}{lem:most_general_homotopy_inclusion} with the roles $K=\\mathrm{ess}\\ \\mathrm{graph}\\ {g_b}$ and $F=\\lifting{f}.$ We need to check that the curve\n $\\xi(t) = \\mathcal{H}\\parenth{\\upthing{u},t}$ remains in $\\lifting{f}$ for \n an arbitrary fixed $\\upthing{u} \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f} \\cap \\mathrm{ess}\\ \\mathrm{graph}\\ {g_b}$ and all sufficiently small $t.$ \nThe implication \\eqref{item:curve_linearized_in_primal} $\\Rightarrow$ \\eqref{item:curve_inclusion} of \\Href{Lemma}{lem:curve_in_the_lifting} \nand the implication \\eqref{item:flat_curve_linearized_in_primal} $\\Rightarrow$ \\eqref{item:flat_curve_inclusion} of \\Href{Lemma}{lem:flat_curve_in_the_lifting} yield this property of the curve $\\xi(t)$ in the corresponding cases. \nThis completes the proof of \\Href{Theorem}{thm:admissiblelinearized_john}.\n\\end{proof}\n\\begin{remark}\n\\Href{Theorem}{thm:admissiblelinearized_john}.\nis the only place in the proof of \\Href{Theorem}{thm:john_condition_general} at which we \nuse the assumption that $\\contactpoint{f}{g_b}$ is a star-like set with respect to~$f$. \n\\end{remark}\n\\subsection{Proof of Theorem~\\ref{thm:john_condition_general}}\nWe start with assertion \\eqref{item:local-maximum-implies-glmp} of the theorem.\nAssume that there are no contact pairs and positive weights satisfying equation~\\eqref{eq:functional_glmp}.\nThen, by \\Href{Lemma}{lem:separation_John_problem}, there exists $ \\parenth{H \\oplus \\gamma, h} \\in \\mathcal{W}$ such that\n\\begin{equation}\\label{eq:john_s-concave-separation}\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\parenth{\\sid,0}} > 0\n \\quad\\text{and}\\quad\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\contactopjohn{\\upthing{u}}{\\upthing{v}}} < 0\n\\end{equation}\nfor all $\\parenth{\\upthing{u},\\upthing{v}} \\in \\contactset{f}{g_b}$.\n\n\nThe matrix \n\\(\n\\mathrm{Id}_{d} + t {H}\n\\)\n is non-singular for all $t \\in [0, \\tau]$ for some positive $\\tau$. \nWe use $\\parenth{H \\oplus \\gamma,h}$ to obtain a perturbation of $g$ within the \nclass $\\funpos{g}$ which is pointwise below $f$ but is of larger integral than \n$g$. Set\n\\[\n g_t(x)=(1+ t\\gamma) g\\! \\parenth{\\parenth{\\mathrm{Id}_d + t H}^{-1}(x-th)}.\n\\]\nClearly, $g_t \\in \\funpos{g}$ \nfor all $t \\in [0, \\tau]$. \nBy implication \\eqref{item:int_admissible_linearized_in_primal} $\\Rightarrow$ \\eqref{item:int_gammaadmissible} of \\Href{Lemma}{lem:john_separation_integral}, \n \\[\n \\int g_t^s > \\int g^s\n \\]\nfor all $t \\in (0, \\tau]$ with some $\\tau > 0.$\nUsing the implication \\eqref{item:admissible_linearized_in_primal} $\\Rightarrow$ \\eqref{item:gammaadmissible} of \\Href{Theorem}{thm:admissiblelinearized_john},\none obtains that $g_t \\leq f$ for all sufficiently small $t$. \nThus, $g$ is not a local maximizer in the John $s$-problem \\eqref{eq:john_problem_intro}, completing the proof of assertion~(\\ref{item:local-maximum-implies-glmp}) of \\Href{Theorem}{thm:john_condition_general}.\n\nWe proceed with assertion~(\\ref{item:glmp-implies-global-maximum}) of the theorem.\nAssume that $h=g$ is not a global maximizer in Positive position John $s$-problem \\eqref{eq:john_problem_pos}.\nThat is, there exist a positive definite matrix $A$, $\\gamma \\in {\\mathbb R}$ and $h \\in \\Re^d$ such that \nthe function $g_1$ defined by\n\\[\ng_1(x) = e^{\\gamma} g\\parenth{A^{-1}(x-h)}, \\text{ for } x\\in\\Re^d\n\\]\nsatisfies $g_1 \\leq f$ and $\\int_{\\Re^d} g_1^s > \\int_{\\Re^d} g^s$. We may assume a bit more by applying a slight contraction on \n$\\lifting{g_1}$ (which is a transformation within $\\funppos{g}$): \n$\\lifting{g_1}\\subset\\operatorname{int}\\parenth{\\lifting{f}}$. We will use $g_1$ to define a perturbation $g_t$ of $g$ (a curve in $\\mathcal{M}$) that is below $f$ with larger integral, and then, taking the derivative of that perturbation at the starting point $t=0$, we will obtain $\\parenth{H\\oplus\\gamma,h}\\in\\mathcal{W}$ that separates $\\parenth{\\sid, 0}$ from the set $\\contactopsetjohn{f}{g}$, which by \\Href{Lemma}{lem:separation_John_problem} will yield the assertion.\n\nSet $H = A - \\mathrm{Id}_d$.\nWe begin by showing that \n\\begin{equation}\\label{eq:john_suff_integral_separation}\n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} > 0.\n\\end{equation}\n\nIndeed, since $\\lifting{g_1}$ is a compact subset of $\\operatorname{int}\\parenth{\\lifting{f}}$, there is a $\\delta>0$ such that $A - \\delta \\mathrm{Id}_d$ is positive definite and the function $\\tilde{g}_1$ defined by \n\\[\n\\tilde g_1(x) = e^{\\gamma} g_1\\parenth{\\parenth{A - \\delta \\mathrm{Id}_d}^{-1}(x-h)},\n\\]\nsatisfies the relations $\\tilde{g}_1\\leq f$ and $\\int_{\\Re^d} g^s < \\int_{\\Re^d} \\tilde{g}_1^s$.\n\n\\[\n\\tilde{g}_t (x) = e^{t\\gamma} g\\parenth{\\parenth{\\mathrm{Id}_d + t\\parenth{H - \\delta \\mathrm{Id}_d}}^{-1}(x - th)} \\in \n\\funppos{g}.\n\\] \t\t\nBy \\Href{Lemma}{lem:inner-function-interpolation}, \n$\\tilde{g}_t \\leq f$ for all $t \\in [0,1],$ \tand\n\\[\n\\int_{\\Re^d} {\\tilde{g}_t^s} \\geq\n \\parenth{ \\int_{\\Re^d} {g}^s}^{1-t}\n \\parenth{\\int_{\\Re^d} \\tilde{g}_1^s}^{t}\n \\geq \\int_{\\Re^d} {g^s}.\n\\]\nUsing implication \\eqref{item:int_gammaadmissible} $\\Rightarrow$ \\eqref{item:int_admissible_linearized_in_primalweak} of \\Href{Lemma}{lem:john_separation_integral}, we have\n\\[\n 0 \\leq \\iprod{\\parenth{\\parenth{H - \\delta \\mathrm{Id}_d} \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} = \n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} - \\delta d, \n\\]\nyielding\n\\begin{equation*}\n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} \\geq \\delta d > 0,\n\\end{equation*}\nand \\eqref{eq:john_suff_integral_separation} follows.\n\nFor $t \\in [0,1)$, define \n\\[\n{g}_t (x) = e^{t\\gamma} g\\parenth{\\parenth{\\mathrm{Id}_d + t H}^{-1}(x - th)}.\n\\] \t\t\nClearly, ${g}_t \\in \\funppos{g}$ for all $t \\in [0,1)$. \nBy the choice of $g_1$ and by \\Href{Lemma}{lem:inner-function-interpolation}, one sees that ${g}_t \\leq f$ for all $t \\in [0,1].$\nThe implication \\eqref{item:gammaadmissible} $\\Rightarrow$ \\eqref{item:admissible_linearized_in_primalweak} in \\Href{Theorem}{thm:admissiblelinearized_john} implies that\n$\\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\contactopjohn{\\upthing{u}}{\\upthing{v}}}$ $\\leq 0$\n\t\tfor all $\\parenth{\\upthing{u},\\upthing{v}} \\in \\contactset{f}{g_b}$.\nCombining this with inequality \\eqref{eq:john_suff_integral_separation},\n\\Href{Lemma}{lem:separation_John_problem} yields assertion \\eqref{item:glmp-implies-global-maximum} of \\Href{Theorem}{thm:john_condition_general}, whose proof is thus complete.\n\n\n\n\n\n\n\n\n\n\n\n\\section{L\\\"owner problem}\\label{sec:lowner}\nFix $s > 0$, and two functions $f, g \\colon \\Re^d \\to [0, \\infty)$. \n\nIn addition to L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro},\nwe will consider the following optimization problem. \n\\medskip\n\n\\textbf{Positive position L\\\"owner $s$-problem for $f$ and $g$:} Find\n\\begin{equation}\n\\label{eq:lowner_problem_pos}\n\\min\\limits_{h \\in \\funppos{g} }\n\t\\int_{\\Re^d} h^s \n\t\\quad \\text{subject to} \\quad\n\tf \\leq h.\n\\end{equation}\n\nWe define local and global minimizers to these problems in the same way as maximizers to the two John $s$-problems introduced in \\Href{Section}{sec:john}.\n\n\nThe next theorem, our main, most general result concerning the L\\\"owner $s$-problem provides a condition of optimality in terms of the polars of the functions -- for the definition, see \\Href{Section}{sec:polarity}.\n\n\\begin{thm}[L\\\"owner's condition]\\label{thm:lowner_condition_general}\nFix $s>0.$ Let $f\\colon\\Re^d\\to[0,+\\infty)$ be a proper function.\nand let $g \\colon \\Re^d \\to [0, \\infty)$ be a proper log-concave function with $f \\leq g.$\nAssume that \n \\begin{itemize}\n\\item $\\loglego{g}$ satisfies our Basic Assumptions (see page~\\pageref{assumptions:basic}); \n\\item the set of contact points $\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$\\loglego{f}$;\n\\item the set $\\contactset{\\loglego{f}}{\\loglego{g}}$ is bounded. \n\\end{itemize} \nThen the following hold.\n\t\\begin{enumerate}\n\t\t\\item\\label{item:lowner-local-minimum-implies-glmp}\n\t\t\tIf $h=g$ is a local minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro} for $f$ and $g$,\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\upthing{u}_1 ,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m ,\\upthing{v}_m) \\in \\contactset{\\loglego{f}}{\\loglego{g}}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ such that\n\t\t\\begin{equation}\\label{eq:functional_glmp-lowner}\n\t\t\\sum_{i=1}^{m} c_i {v}_i \\otimes {u}_i = \n\t\t \\mathrm{Id}_{d}, \\quad \n\t\t \\sum_{i=1}^{m} c_i \\loglego{g}(u_i ) \\cdot \\nu_i = s\n\t\t\t\t\t\\quad\\text{and}\\quad\n\t\t\t\t\t\\sum_{i=1}^{m} c_i\\loglego{g}(u_i ) \\cdot \\nu_i u_i =0,\n \\end{equation} \n where $\\upthing{u}_i=(u_i, \\loglego{g}(u_i ))$ and \n $\\upthing{v}_i=(v_i,\\nu_i) \\in \\nfcone{\\lifting{\\loglego{g}}}{\\upthing{u}_i}$.\n\t\t\\item\\label{item:glmp-lowner-implies-global-minimum}\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp-lowner},\n\t\t\tthen $h=g$ is a global maximizer in Positive position L\\\"owner $s$-problem \\eqref{eq:lowner_problem_pos} for $f$ and $g$.\n\t\\end{enumerate}\n\\end{thm}\n\n\n\n\nThe proof is similar to that of \\Href{Theorem}{thm:john_condition_general} with an essential additional idea. Instead of studying the inequality $f \\leq g_t$ for a perturbation $g_t$ of $g$, we will study the equivalent inequality $\\loglego{f} \\geq \\loglego{g_t}$. \n\n\\subsection{Main components of the proof of \\texorpdfstring{\\Href{Theorem}{thm:lowner_condition_general}}{Theorem~\\ref{thm:lowner_condition_general}}}\nFirst, the following lemma, the dual to \n\\Href{Lemma}{lem:inner-function-interpolation}, allows us to interpolate between \ntwo functions above a given one in such a way that the new function remains \nabove the given one.\n\n\\begin{lem}[Outer interpolation of functions]\\label{lem:outer-interpolation}\n\tLet $f \\colon \\Re^d \\to [0, +\\infty)$ be a function and $g \\colon \\Re^d \\to [0, +\\infty)$ be a log-concave function.\n\tLet $\\upalpha_1,\\upalpha_2>0$, $A_1, A_2$ be non-singular matrices of order $d,$\n\tand $a_1, a_2\\in\\Re^d$\n\tbe such that\n\t\t\\[\n\t\t\tf(x) \\leq \\upalpha_1 g(A_1x +a_1)\n\t\t\t\\quad \\text{and}\\quad\n\t\t\tf(x) \\leq \\upalpha_2 g(A_2x +a_2)\n\t\t\\]\n\tfor all $x\\in\\Re^d$.\n\tLet $\\beta_1,\\beta_2>0$ be such that $\\beta_1+\\beta_2=1$.\n\tDefine\n\t\t\\[\n\t\t\t\\upalpha = \\upalpha_1^{\\beta_1} \\upalpha_2^{\\beta_2},\n\t\t\t\\quad\n\t\t\tA = \\beta_1 A_1 + \\beta_2 A_2,\n\t\t\t\\quad\\text{and}\\quad\n\t\t\ta = \\beta_1 a_1 + \\beta_2 a_2.\n\t\t\\]\n\tAssume that $A$ is non-singular.\t\n\tThen\n\t\t\\begin{equation}\\label{eq:outer-interpolation-1}\n\t\t\tf(x) \\leq \\upalpha g(Ax + a).\n\t\t\\end{equation}\n\tIf $A_1$ and $A_2$ are positive definite and $g$ is integrable, then also\n\t\t\\begin{equation}\\label{eq:outer-interpolation-2}\n\t\t\t\\int_{\\Re^d} \\upalpha g(Ax+a) \\di x \\leq\n\t\t\t\t\\left(\\int_{\\Re^d} \\upalpha_1 g(A_1x + a_1) \\di x\\right)^{\\beta_1}\n\t\t\t\t\\left(\\int_{\\Re^d} \\upalpha_2 g(A_2x + a_2) \\di x\\right)^{\\beta_2}\n\t\t\\end{equation}\nwith equality if and only if $A_1 = A_2.$\n\\end{lem}\n\\begin{proof}\nBy the assumption of the lemma,\n\\[\nf(x) = f^{\\beta_1}(x) \\cdot f^{\\beta_2}(x) \\leq \n\\upalpha_1^{\\beta_1} g^{\\beta_1}(A_1 x + a_1) \\cdot \n\\upalpha_2^{\\beta_2} g^{\\beta_2}(A_2 x + a_2) = \n\\upalpha g^{\\beta_1}(A_1 x + a_1) \\cdot \n g^{\\beta_2}(A_2 x + a_2)\n\\]\nBy our definitions, $ \\beta_1 (A_1 x + a_1) + \\beta_2(A_2 x + a_2) = Ax + a.$\nThus, the log-concavity of $g$ yields inequality \\eqref{eq:outer-interpolation-1}.\n\nInequality \\eqref{eq:outer-interpolation-2} immediately follows from Minkowski's determinant inequality \\eqref{eq:minkowski_det_multipl_ineq} and its equality condition.\n\\end{proof}\n\n\n\\begin{dfn}\\label{dfn:contactoplowner}\nFor any $(\\upthing{u},\\upthing{v})\\in\\Re^{d+1}\\times\\Re^{d+1}$, \nwe define the \\emph{L\\\"owner-type extended contact operator} by\n\\[\n\\contactoplowner{\\upthing{u}}{\\upthing{v}} = \\parenth{\\parenth{v \\otimes u} \\oplus \\mu\\nu, \\mu\\nu u}\\in \\mathcal{W},\n\\]\nwhere $\\upthing{u}=(u,\\mu)\\in\\Re^{d+1}$, and $\\upthing{v}=(v,\\nu)\\in\\Re^{d+1}$.\n\nFor two functions $f,g:\\Re^d\\to[0,\\infty)$, we denote the set of L\\\"owner-type extended contact operators by\n \\begin{equation}\\label{eq:lownercontactoperators}\n \\contactopsetlowner{f}{g}=\n \\big\\{\\contactoplowner{\\upthing{u}}{\\upthing{v}} \\colon\n (\\upthing{u},\\upthing{v})\\in \\contactset{f}{g} \\big\\}\\subset\\mathcal{W}.\n \\end{equation}\n\\end{dfn}\n\n\n\n\n\n\\begin{lem}[Compactness of the set of contact operators]\\label{lem:compact_contact_operatorLowner}\n Let functions $f,g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}. Then \n $\\contactopsetlowner{\\loglego{f}}{\\loglego{g}}$ is a compact subset of $\\mathcal{W}$.\n\\end{lem}\nWe omit the proof as it is essentially the same as the proof of \\Href{Lemma}{lem:compact_contact_operator}.\n\n\n\nNext, we reformulate equation \\eqref{eq:functional_glmp-lowner} in terms of separation of a closed convex set from a point in the finite-dimensional real vector space $\\mathcal{W}$. The proof is identical to the proof of \\Href{Lemma}{lem:separation_John_problem}, we omit it.\n\n\\begin{lem}[Separation of operators]\\label{lem:separation_Lowner_problem}\nFor any two upper semi-continuous functions $f,g\\colon\\Re^d\\to[0,+\\infty)$, the following assertions are equivalent:\n\\begin{enumerate}\n\\item There are no contact pairs of $f$ and $g$ and positive weights satisfying equation \\eqref{eq:functional_glmp-lowner}.\n\\item There exists $ \\parenth{H \\oplus \\gamma, h}\\in\\mathcal{W}$ such that\n\\begin{equation}\\label{eq:lowner-stict-separation}\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\parenth{\\sid,0}} > 0\n \\quad\\text{and}\\quad\n\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactoplowner{\\upthing{u} }{\\upthing{v}}}\n< 0\n\\end{equation}\nfor all $\\parenth{\\upthing{u}, \\upthing{v}} \\in \\contactset{\\loglego{f}}{\\loglego{g}}$.\n\\item There exists $ \\parenth{H \\oplus \\gamma, h}\\in\\mathcal{W}$ such that\n\\begin{equation}\\label{eq:lowner-nonstrict-separation}\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\parenth{\\sid,0}} > 0\n \\quad\\text{and}\\quad\n\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactoplowner{\\upthing{u} }{\\upthing{v}}}\n \\leq 0\n\\end{equation}\nfor all $\\parenth{\\upthing{u}, \\upthing{v}} \\in \\contactset{\\loglego{f}}{\\loglego{g}}$.\n\\end{enumerate}\n\\end{lem}\t\n\n\nNext, we observe that the leftmost inequalities in \\eqref{eq:lowner-stict-separation} and \\eqref{eq:lowner-nonstrict-separation} compare the integrals of $g$ and a perturbation of $g$ defined by\n \\begin{equation}\\label{eq:perturbationdefLowner}\ng_t (x) = \\frac{1}{\\upalpha_t} g \\! \\parenth{\\transpose{A_t}x + a_t },\\;\\text{ for }x\\in\\Re^d. \n \\end{equation}\n\n\n\\begin{lem}[Integral of a perturbation of $g$]\\label{lem:lowner_separation_integral}\nFix $s > 0$, and let $g \\colon \\Re^d \\to [0, +\\infty)$ be a function such that $g^s$ is of finite positive integral.\n\t Let $\\Gamma(t) = (A_t \\oplus \\upalpha_t, a_t),$ $ t \\in [0,1]$ be a curve in $\\mathcal{M}$ with $(A_0 \\oplus \\upalpha_0, a_0)=(\\mathrm{Id}_{d+1},0)$, and assume that the right derivative of $\\Gamma$ at $t=0$ is of the form $(H\\oplus\\gamma,h)$. Define the perturbation $g_t$ of $g$ by \\eqref{eq:perturbationdefLowner}. Consider the following statements:\n \\begin{enumerate}[(a)]\n \\item\\label{item:int_lowner_admissible_linearized_in_primal}\n \\[\\iprod{\\parenth{H \\oplus \\gamma,h}}{\\parenth{\\sid,0}} > 0;\\] \n \\item\\label{item:int_lowner_gammaadmissible} \t\n \\[\\int {g}_t^s < \\int g^s\\]\n for all $t \\in (0, \\tau]$ and some $\\tau > 0.$\t\n \\item\\label{item:int_lowner_admissible_linearized_in_primalweak} \n \\[\\iprod{\\parenth{H \\oplus \\gamma,h}}{\\parenth{\\sid,0}} \\geq 0.\\] \n \\end{enumerate}\n Then \\eqref{item:int_admissible_linearized_in_primal} implies \\eqref{item:int_gammaadmissible}, and \\eqref{item:gammaadmissible} implies \\eqref{item:int_admissible_linearized_in_primalweak}.\n\\end{lem}\n\\begin{proof}\nOne has\n\\[\n\t\\int_{\\Re^d} {g}_t^s = \n\t \\upalpha_t^{-s} \\det \\transpose{\\parenth{A_t^{-1}}} \\int_{\\Re^d} g^s = \n\t\\parenth{1 + t \\gamma + \\littleo{t}}^{-s} \\parenth{1 - t\\tr{H} + \\littleo{t} } \\int_{\\Re^d} g^s=\n\\]\n\\[\n=\\parenth{1 - t \\parenth{s \\gamma + \\tr{H}} + \\littleo{t} } \\int_{\\Re^d} g^s\n=\\parenth{1 - t \\iprod{\\parenth{H \\oplus \\gamma,h}}\n\t\t\t{\\parenth{\\sid,0}} + \\littleo{t} } \\int_{\\Re^d} g^s.\n\\]\nThe result follows.\n\\end{proof}\n\n\n\n\\begin{lem}[Polar of a transformed function]\\label{lem:polar_func_composed_affine}\nLet $f \\colon \\Re^d \\to [0, + \\infty)$ be a proper log-concave function,\n$A$ be a non-singular matrix of order $d$, $\\upalpha > 0$ and $a \\in \\Re^d$.\nSet $\\tilde{f}(x) = \\upalpha f(Ax +a)$.\nThen\n\\[\n\\loglego{\\tilde{f}}(y) = \n\\frac{\\loglego{f}(\\transpose{\\parenth{A^{-1}}}y)}{\\upalpha} \\cdot\ne^{\\iprod{\\transpose{\\parenth{A^{-1}}}y}{\\ \\! a}}. \n\\] \n\\end{lem}\n\\begin{proof}\n\\[\n\\loglego{\\tilde{f}}(y) = \n\\inf\\limits_{x \\in \\supp \\tilde{f}} \\frac{e^{-\\iprod{x}{y}}}{\\tilde{f}(x)} =\n\\frac{1}{\\upalpha } \\inf\\limits_{x \\in \\supp \\tilde{f}} \\frac{e^{-\\iprod{x}{y}}}{{f}(Ax +a)} =\n\\frac{1}{\\upalpha } \n\\inf\\limits_{x \\in \\supp \\tilde{f}}\n\\frac{e^{-\\iprod{Ax}{\\ \\transpose{\\parenth{A^{-1}}}y}}}{{f}(Ax +a)}=\n\\]\n\\[ \n\\frac{1}{\\upalpha } \n\\inf\\limits_{x \\in \\supp \\tilde{f}}\n\\frac{e^{-\\iprod{Ax + a}{\\ \\transpose{\\parenth{A^{-1}}}y}}}{{f}(Ax +a)} \ne^{\\iprod{a}{\\ \\transpose{\\parenth{A^{-1}}}y}}=\n \\frac{e^{\\iprod{a}{\\ \\transpose{\\parenth{A^{-1}}}y}}}{\\upalpha } \n\\inf\\limits_{z \\in \\supp {f}}\n\\frac{e^{-\\iprod{z}{\\ \\transpose{\\parenth{A^{-1}}}y}}}{{f}(z)} = \n\\] \n\\[\n\\frac{\\loglego{f}(\\transpose{\\parenth{A^{-1}}}y)}{\\upalpha} \\cdot\ne^{\\iprod{\\transpose{\\parenth{A^{-1}}}y}{\\ \\! a}}.\n\\]\n\\end{proof}\n\nUsing \\Href{Lemma}{lem:polar_func_composed_affine} and the compactness of $\\lifting{\\loglego{g}}$ together with \n \\Href{Corollary}{cor:normalcone_nonempty}, we obtain the following. \n\\begin{lem}\n Let functions $f,g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}. Assume $h=g$ is a local minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro}. Then the sets $\\contactset{\\loglego{f}}{\\loglego{g}}$ and $\\contactpoint{\\loglego{f}}{\\loglego{g}}$ are non-empty.\n\\end{lem}\n\n\nFinally, we show that the rightmost inequalities \\eqref{eq:lowner-stict-separation} and \\eqref{eq:lowner-nonstrict-separation} encode that a certain perturbation of $g$ is pointwise above $f$.\n\n\\begin{thm}[Characterization of admissible perturbations]\\label{thm:admissiblelinearized_lowner}\n Let the functions $f,g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}, and let $\\Gamma(t) = (A_t \\oplus \\upalpha_t, a_t), t \\in [0,1]$ be a curve in $\\mathcal{M}$ with $(A_0 \\oplus \\upalpha_0, a_0)=(\\mathrm{Id}_{d+1},0)$, and assume that the right derivative of $\\Gamma$ at $t=0$ is of the form $(H\\oplus\\gamma,h)$. Define the perturbation $g_t$ of $g$ by \\eqref{eq:perturbationdefLowner}. Consider the following statements:\n \\begin{enumerate}[(a)]\n \\item\\label{item:lowner_admissible_linearized_in_primal}\n$\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactoplowner{\\upthing{u}}{\\upthing{v}}}\n< 0$\nfor all $\\parenth{\\upthing{u}, \\upthing{v}} \\in \\contactset{\\loglego{f}}{\\loglego{g}}$.\n \\item\\label{item:lowner_admissible} \n There is $\\varepsilon > 0$ such that $f \\leq {g}_t$ for all $t\\in[0,\\varepsilon]$.\n \\item\\label{item:lowner_admissible_linearized_in_primalweak}\n$\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactoplowner{\\upthing{u}}{\\upthing{v}}}\n\\leq 0$\nfor all $\\parenth{\\upthing{u}, \\upthing{v}} \\in \\contactset{\\loglego{f}}{\\loglego{g}}$.\n \\end{enumerate}\nThen \\eqref{item:admissible_linearized_in_primal} implies \\eqref{item:gammaadmissible}, and \\eqref{item:gammaadmissible} implies \\eqref{item:admissible_linearized_in_primalweak}.\n\\end{thm}\n\\begin{proof}\nThe relation $f \\leq {g}_t$ is equivalent to $\\loglego{g_t} \\leq \\loglego{f},$ \nwhich we will proceed to work with. Since $\\lifting{\\loglego{f}}$ and $\\lifting{\\loglego{g}}$ are symmetric about $\\Re^d$, it suffices to consider \n$ \\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{g_t}}.$ \nDefine the homothopy $\\mathcal{H} \\colon \n\\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{g}} \\times [0, 1] \\to {\\mathbb R}^{d+1}$ by \n\\[\n\\mathcal{H}(\\upthing{y},t)=\\parenth{{A_t} y ,\\ \\loglego{g_t} \\! \\parenth{{A_t}y}}. \n\\]\nfor all $y \\in \\cl{\\supp \\loglego{g}}$ and $\\upthing{y} = (y, \\loglego{g}(y)).$\nBy \\Href{Lemma}{lem:polar_func_composed_affine},\n\\[\n\\loglego{g_t} ({A_t} y) = \n\\upalpha_t \\loglego{g} \\! \\parenth{{A_t^{-1}}A_t y} \ne^{\\iprod{A_t^{-1} {A_t} y}{\\ a_t}} = \n\\upalpha_t \\loglego{g} (y) e^{\\iprod{y}{a_t}}\n\\]\nand\n$\n {{A_t}} \\parenth{\\supp \\loglego{g}} = \\supp \\loglego{{g}_t}.\n$ \nThat is,\n\\[\n\\mathcal{H}\\! \\parenth{\\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{g}},t} = \\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{g_t}}.\n\\]\n\n\nConsider an arbitrary $u \\in \\cl{\\supp \\loglego{g}}$ and \n$\\upthing{v}=(v,\\nu)\\in\\Re^d \\times {\\mathbb R}$. Set $\\upthing{u} = (u, \\loglego{g}(u))$.\nOne has\n\\[\n \\mathcal{H}^\\prime:=\\derivativeatzero\\mathcal{H}(\\upthing{u} , t)=\n \\parenth{ Hu ,\\ \\loglego{g}(u) \\parenth{\\gamma + \\iprod{u}{h}}},\n\\]\nand thus, by \\Href{Definition}{dfn:contactoplowner},\n\\begin{equation}\\label{eq:iprodvhiprodtensor_lowner}\n\\iprod{\\upthing{v}}{\\mathcal{H}^\\prime}=\\iprod{v}{H u }\n + \\nu \\loglego{g}(u) \\gamma + \\nu \\loglego{g}(u) \\iprod{u}{h}\n=\\iprod{\\big(H\\oplus\\gamma,h\\big)}{\\contactoplowner{\\upthing{u}}{\\upthing{v}}}.\n\\end{equation}\n\nConsider the case $\\loglego{g}(u)>0$. \nWe recall the assumption of \\Href{Theorem}{thm:lowner_condition_general} according to which \n $\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$\\loglego{f}$.\nUsing \\Href{Lemma}{lem:locstar_geom_meaning} in identity \\eqref{eq:iprodvhiprodtensor_lowner}, \nwe see that for a fixed $\\upthing{u} \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{f}}$ with non-zero last coordinate, assertion \\eqref{item:lowner_admissible_linearized_in_primal} of the theorem is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\operatorname{int} \\parenth{\\nfcone{{\\lifting{\\loglego{f}}}}{\\upthing{u} }}^{\\circ}.\n\\]\nSimilarly, assertion \\eqref{item:admissible_linearized_in_primalweak} of \\Href{Theorem}{thm:admissiblelinearized_john} is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in\\parenth{\\nfcone{{\\lifting{\\loglego{f}}}}{\\upthing{u} }}^{\\circ}.\n\\]\n\nConsider the case $\\loglego{g}(u)=0$.\nSince $\\supp \\loglego{g}$ is bounded and contains the origin in its interior by the assumption of \\Href{Theorem}{thm:lowner_condition_general}, the origin is contained in the interior of $\\supp \\loglego{f}$. This ensures that at every contact point $\\upthing{u}\\in \\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{g}}\\cap \\mathrm{ess}\\ \\mathrm{graph}\\ {\\loglego{f}}$ with $\\loglego{f}(u) = 0$, every outer normal direction to \n$\\supp{\\loglego{f}}$ has an acute angle with $\\upthing{u} = (u,0) \\in \\Re^d$, and hence, it can be represented by a vector $\\upthing{v}= (v,0)\\in {\\mathbb R}^d$ such that \n$\\iprod{\\upthing{u}}{\\upthing{v}}=1$, which yields \n$(\\upthing{u},\\upthing{v})\\in\\contactset{\\loglego{g}}{\\loglego{f}}$.\n\nIt follows from identity \\eqref{eq:iprodvhiprodtensor} that for a fixed $\\upthing{u}$ whose last coordinate is zero, assertion \\eqref{item:admissible_linearized_in_primal} of the theorem is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\operatorname{int} \\parenth{\\nfcone{\\supp{\\loglego{f}}}{\\upthing{u}}}^{\\circ},\n\\]\nwhere all the sets and the polarity are meant in $\\Re^d.$\nSimilarly, assertion \\eqref{item:admissible_linearized_in_primalweak} of \\Href{Theorem}{thm:admissiblelinearized_john} is equivalent to\n\\[\n\\mathcal{H}^\\prime \\in \\parenth{\\nfcone{\\supp{\\loglego{f}}}{\\upthing{u}}}^{\\circ}.\n\\]\n\nThe rest of the proof of \\Href{Theorem}{thm:admissiblelinearized_lowner} is identical to the end of the proof of \\Href{Theorem}{thm:admissiblelinearized_john}, and so we omit it.\n\\end{proof}\n\\begin{remark}\n\\Href{Theorem}{thm:admissiblelinearized_lowner}.\nis the only place in the proof of \\Href{Theorem}{thm:lowner_condition_general} at which we \nuse the assumption that $\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$\\loglego{f}$. \n\\end{remark}\n\t\n\\subsection{Proof of Theorem~\\ref{thm:lowner_condition_general}}\nWe start with assertion \\eqref{item:lowner-local-minimum-implies-glmp} of the theorem.\nAssume that there are no contact pairs and positive weights satisfying equation~\\eqref{eq:functional_glmp-lowner}.\nThen, by \\Href{Lemma}{lem:separation_Lowner_problem}, there exists $\\parenth{H \\oplus \\gamma, h} \\in \\mathcal{W}$ such that\n\\begin{equation}\\label{eq:lowner-concave-separation}\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\parenth{\\sid,0}} > 0\n \\quad\\text{and}\\quad\n \\iprod{\\parenth{H \\oplus \\gamma,h}}\n {\\contactoplowner{\\upthing{u} + c }{\\upthing{v}}} < 0\n\\end{equation}\nfor all $\\parenth{\\upthing{u}, \\upthing{v}}\\in \\contactset{\\loglego{f}}\n{\\loglego{g}}$.\n\nThe matrix \n\\(\n\\mathrm{Id}_{d} - t {H}\n\\)\nis non-singular and $1 + t \\gamma > 0$ for all $t \\in [0, \\tau]$ for some positive $\\tau.$ \n Set\n\\( A_t = {{\\parenth{\\mathrm{Id}_{d} - t {H}}^{-1}}}\\) and\n\t\t\\[\n\t\t\tg_t(x)=\\frac{1}{1 + t \\gamma} \\ g\\! \\parenth{\\transpose{A_t} x + t h}.\n\t\t\\]\nClearly, $g_t \\in \\funpos{g}$ for all $t \\in [0, \\tau].$ \nBy implication \\eqref{item:int_lowner_admissible_linearized_in_primal} $\\Rightarrow$ \\eqref{item:int_lowner_gammaadmissible} of \\Href{Lemma}{lem:lowner_separation_integral}, \n\\[\n \\int g_t^s < \\int g^s\n\\]\nfor all $t \\in (0, \\tau]$ and some $\\tau > 0$.\n\nUsing the implication \\eqref{item:lowner_admissible_linearized_in_primal} $\\Rightarrow$ \\eqref{item:lowner_admissible} of \\Href{Theorem}{thm:admissiblelinearized_lowner},\none gets that $f \\leq g_t$ for all sufficiently small $t$. \nThus, $g$ is not a local minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro},\ncompleting the proof of assertion \\eqref{item:lowner-local-minimum-implies-glmp} of \\Href{Theorem}{thm:lowner_condition_general}.\n\n\n\nWe proceed with assertion \\eqref{item:glmp-implies-global-maximum} of the theorem.\nAssume that $g$ is not a global minimizer in Positive position L\\\"owner $s$-problem \\eqref{eq:lowner_problem_pos}. That is, there exist a positive definite matrix $A$,\n$\\gamma \\in {\\mathbb R}, h \\in \\Re^d,$ such that \nthe function $g_1$ defined by\n\\[\ng_1(x) = e^{-\\gamma} g\\! \\parenth{A x + h}\\;\\text{ for }\\; x\\in\\Re^d\n\\]\nsatisfies $f \\leq g_1$ and $\\int_{\\Re^d} g_1^s < \\int_{\\Re^d} g^s$.\nWe will use $g_1$ to define a perturbation $g_t$ of $g$ (a curve in $\\mathcal{M}$) that is above $f$ with smaller integral, and then, taking the derivative of that perturbation at the starting point $t=0$, we will find $\\parenth{H\\oplus\\gamma,h}\\in\\mathcal{W}$ that separates $\\parenth{\\sid, 0}$ from the set $\\contactopsetlowner{\\loglego{f}}{\\loglego{g}}$. This, by \\Href{Lemma}{lem:separation_Lowner_problem}, will yield the assertion.\n\nDefine $H = A - \\mathrm{Id}_d.$ \n We begin by showing that \n\\begin{equation}\\label{eq:lowner_suff_integral_separation}\n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} > 0.\n\\end{equation}\nFor any $\\delta > 0$, the function $\\tilde{g}$ defined by \n\\[\n\\tilde{g}(x) = e^{-\\gamma + \\delta} g\\parenth{Ax + h}\n\\]\nsatisfies the relation $f \\leq \\tilde{g}$. Moreover, for a sufficiently small $\\delta$, we have also\n$\\int_{\\Re^d} \\tilde{g}^s < \\int_{\\Re^d} g^s$.\n\nDefine\n\\[\n\\tilde{g}_t (x) = e^{-t(\\gamma - \\delta)}\ng\\parenth{\\parenth{\\mathrm{Id}_d + t H} x + th} \\in \n\\funppos{g}.\n\\] \t\t\nBy \\Href{Lemma}{lem:outer-interpolation}, \n$f \\leq \\tilde{g}_t$ for all $t \\in [0,1],$ \tand\n\\[\n\t\t\t\t\\int_{\\Re^d} {\\tilde{g}_t^s} \\leq\n\t\t\t\t\t\\parenth{ \\int_{\\Re^d} {g}^s}^{1-t}\n\t\t\t\t\t \\parenth{\\int_{\\Re^d} \\tilde{g}_1^s}^{t}\n\t\t\t\t\t\\leq \\int_{\\Re^d} {g^s}.\n\\]\nUsing implication \\eqref{item:int_lowner_gammaadmissible} $\\Rightarrow$ \\eqref{item:int_lowner_admissible_linearized_in_primalweak} of \\Href{Lemma}{lem:lowner_separation_integral}, we have\n\\[\n 0 \\leq \\iprod{\\parenth{H \\oplus (\\gamma - \\delta), z}}{\\parenth{\\sid, 0}} = \n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} - \\delta s. \n\\]\nWe conclude that \n\\begin{equation*}\n\\iprod{\\parenth{H \\oplus \\gamma, h}}{\\parenth{\\sid, 0}} \\geq \\delta s > 0,\n\\end{equation*}\nand \\eqref{eq:lowner_suff_integral_separation} follows.\n\nFor $t \\in [0,1),$ define \n\\[\n{g}_t (x) = e^{t\\gamma} g\\parenth{\\parenth{\\mathrm{Id}_d + t H}x + th}.\n\\] \t\t\nThat is, $g_0 = g,$ and ${g}_t \\in \n\\funppos{g}$ for all $t \\in [0,1).$\nBy the choice of $g_1$ and by \\Href{Lemma}{lem:outer-interpolation}, one sees that $f \\leq {g}_t$ for all $t \\in [0,1].$\nThe implication \\eqref{item:lowner_admissible} $\\Rightarrow$ \\eqref{item:lowner_admissible_linearized_in_primalweak} in \\Href{Theorem}{thm:admissiblelinearized_lowner} implies that\n$\\iprod{\\parenth{H \\oplus \\gamma,h}}\n{\\contactoplowner{\\upthing{u}+c}{\\upthing{v}}} \\leq 0\n$\nfor all $ \\parenth{\\upthing{u}, \\upthing{v}} \\in \\contactset{\\loglego{f}}{\\loglego{g}}$.\nCombining this with inequality \\eqref{eq:lowner_suff_integral_separation},\n\\Href{Lemma}{lem:separation_Lowner_problem} yields assertion \\eqref{item:glmp-lowner-implies-global-minimum} of \\Href{Theorem}{thm:lowner_condition_general}, completing the proof of \\Href{Theorem}{thm:lowner_condition_general}.\n\n\n\n\n\n\\section{Existence and uniqueness of solutions}\\label{sec:existence_uniqueness}\n\nFor any proper log-concave function $f \\colon \\Re^d\\to[0,\\infty),$\nthere exists a positive constant $C$ such that the \nintegral of the proper log-concave function $f$ over any line is\nat most $C$. Indeed, it follows, from the existence of constants\n $\\Theta, \\nu > 0$ depending only on $f$ that satisfy \n\\begin{equation}\\label{eq:proper_log-concave_bound}\n\tf(x) \\leq \\Theta e^{-\\nu \\enorm{x}} \n\\end{equation} \n{ for all } $x\\in\\Re^d,$\nsee \\cite[Lemma~2.2.1] \n{brazitikos2014geometry}.\nWe will use the notation\n\\[\nC_f=\\sup_{\\ell} \\int_{\\ell} f,\n\\]\nwhere the supremum is taken over all lines $\\ell$ in $\\Re^d$.\n\nThe following technical fact, essentially a rephrasing of Lemma~3.2 of \n\\cite{ivanov2022functional}, will allow us to use compactness arguments in finding the optima in the John and L\\\"owner $s$-problems.\n\n\\begin{lem}[Boundedness of the admissible set]\\label{lem:boundedness}\nFor any proper log-concave function $f \\colon \\Re^d\\to[0,\\infty)$ and any $\\delta>0$, \nthere exist $\\vartheta,\\rho,\\rho_1>0$ with the following property. If \nfor\na proper log-concave function \n$g \\colon \\Re^d \\to [0,\\infty)$ with $g \\leq g(0) =1$ and \n$(A\\oplus\\upalpha,a)\\in\\mathcal{M}$, the function\n$w \\colon \\Re^d \\to [0,\\infty)$ given by \n\\[\nw(x) = \\upalpha g\\parenth{A^{-1}(x-a)}\n\\]\nsatisfies $w \\leq f$ and \n$ \\int_{\\Re^d} w \\geq\\delta$, then the following inequalities hold:\n\\begin{equation}\\label{eq:john_alpha_a_universal_bound}\n \\vartheta\\leq\\upalpha \\leq \\norm{f}\n\\quad \\text{and} \\quad \n \\enorm{a} \\leq \\rho,\n\\end{equation}\nand\n\\begin{equation}\n\\label{eq:john_comparison_operator}\n \\frac{\\rho_1}{ \\int_{\\Re^d} {g}} \n\\parenth{\\frac{C_{f}}{ C_{g}}}^{1-d} \\leq\n \\frac{1}{\\norm{A^{-1}}} \\leq \\norm{A} \\leq \n\\frac{C_{f}}{\\vartheta C_{g}}.\n\\end{equation}\nwhere $C_{g}$ is the maximum of the integral of the restriction of \n$g$ to a line in $\\Re^d$.\n\\end{lem}\n\\begin{proof}\nThe proof is a minor modification of the proof of a particular case of Lemma~3.2 of \\cite{ivanov2022functional}. \nObviously, $\\upalpha \\leq \\norm{f}.$\nTo bound $\\upalpha$ from below, we fix $\\vartheta$ with\n$\\upalpha \\leq \\vartheta.$ Then \n$w \\leq \\vartheta$, and thus,\n\\[\n \\int_{\\Re^d} w \\leq\n\\int_{\\Re^d} \\min\\{f(x), \\vartheta\\} \\di x.\n\\]\nSince $f$ is a non-negative function of finite integral, the last expression is \nless than $\\delta$ if $\\vartheta$ is sufficiently small. Thus, the leftmost \ninequality in \\eqref{eq:john_alpha_a_universal_bound} holds. \nSince $w(a) = \\upalpha,$ we conclude that\n$a \\in [f \\geq \\vartheta]$ completing the proof of \n\\eqref{eq:john_alpha_a_universal_bound}.\n\n\nWe proceed with inequality~\\eqref{eq:john_comparison_operator}.\nClearly, $C_f \\geq C_w = \\upalpha \\norm{A} C_{g}\\geq \\vartheta \\norm{A} C_{g}$.\n Thus, the rightmost relation in \n\\eqref{eq:john_comparison_operator} holds.\n\nBy the assumption, we have\n\\[\n \\delta \\leq \\int_{\\Re^d} w = \\upalpha \\abs{\\det A} \\cdot\n \\int_{\\Re^d} g .\n\\]\nLet $\\beta$ be the smallest singular value of $A$.\nBy the previous inequality and \nsince $\\upalpha \\in [\\vartheta, \\norm{f}],$ we have\n\\[\n0 < \\frac{\\delta}{\\norm{f}} \\frac{1}{ \\int_{\\Re^d} g} \\leq \n\\abs{\\det A} \\leq \\beta \\cdot \\norm{A}^{d-1}.\n\\]\nBy the rightmost relation in \\eqref{eq:john_comparison_operator}, the existence of \n$\\rho_1$ follows.\n\\end{proof}\n\n\n\\subsection{Existence and uniqueness in the (Positive) John \\texorpdfstring{$s$}{s}-problem}\n\n\\begin{prop}\\label{prop:existence_uniqueness_john}\nLet $f, g \\colon \\Re^d\\to[0,\\infty)$ be two proper log-concave functions such that \n$g \\leq f.$\nThen John $s$-problem \\eqref{eq:john_problem_intro}\nand Positive John $s$-problem \\eqref{eq:john_problem_pos} have solutions.\nMoreover, if $g$ is of bounded support, then the solution to {Positive John $s$-problem} \\eqref{eq:john_problem_pos} is unique.\n\\end{prop}\n\nWe note that as was shown in \\cite{ivanov2022functional}, the solution \nto the {Positive John $s$-problem} is not necessarily unique without assumption on the boundedness of the support. For example, it is not necessarily unique for the standard Gaussian density. \n\n\\begin{proof}[Proof of \\Href{Proposition}{prop:existence_uniqueness_john}]\nThe existence of the solutions follows from \\Href{Lemma}{lem:boundedness} and a routine compactness argument. \nSo, we only need to show the uniqueness of the solution to the {Positive John $s$-problem} \\eqref{eq:john_problem_pos}.\nLet $A_1$ and $A_2$ be rank $d$ positive definite matrices, $a_1, a_2 \\in \\Re^d$, and\n$\\upalpha_1, \\upalpha_2>0$ be such that the functions \n\\[\nh_1(x) = \\upalpha_1 {g}\\! \\parenth{A_{1}^{-1}(x - a_1)}\n\\quad \\text{and} \\quad \nh_2(x) = \\upalpha_2 {g}\\! \\parenth{A_{2}^{-1}(x - a_2)}\n\\]\nare the solutions to Positive John $s$-problem \\eqref{eq:john_problem_pos}.\nIn particular, the integrals of the $s$ power of these functions are equal. \nBy \\Href{Lemma}{lem:inner-function-interpolation}, $A_1 = A_2$.\nHence, $\\upalpha_1 = \\upalpha_2$ as well. \nThat is, the liftings of $h_1$ and $h_2$ are translates of each other. The log-concavity of $f$ implies that the set $\\lifting{h_1} + [0,2w]$ with non-zero $w \\in \\Re^d$ is contained in \n$\\lifting{f}$. \nWe claim that that there is a position $h$ of ${g}$ below $f$ such that $\\int h^s> \\int h_1^s$. \nIndeed, consider the function $h_{1.5}(x) = h_1 (x - w)$. Clearly, \n$\\lifting{h_{1.5}} \\subset \\lifting{h_1} + [0,2w] \\subset \\lifting{f}$.\nLet $h_{1.5}$ attain its maximum at $z$. It means that $z$ belongs to all non-empty level-sets of $h_{1.5}$. These level sets are compact convex sets, since $h_{1.5}$ is a log-concave function of compact support. Let $S_{\\varepsilon}$ be the linear transformation that scales $\\Re^d$ in the direction of $w$ by the factor $1 + \\varepsilon$. Then, for a sufficiently small positive $\\varepsilon$, the inclusion \n\\[\nS_{\\varepsilon} \\parenth{\\left[ h \\geq \\Theta\\right] - z} \\subset \\left[ h_1 \\geq \\Theta\\right] + [0, 2w]\n\\] \nholds for all $\\Theta \\in (0, \\norm{h}]$.\nThat is, $S_{\\varepsilon} \\parenth{\\lifting{h_{1.5}} - z} \\subset \\lifting{f}$.\nHowever, the set on the left-hand side of the last inclusion is the lifting of some position $h$ of ${g}$, completing the proof of \\Href{Proposition}{prop:existence_uniqueness_john}.\n\\end{proof}\n\n\n\\subsection{Existence in the (Positive) L\\\"owner \\texorpdfstring{$s$}{s}-problem}\n\nBy a routine limiting argument, \\Href{Lemma}{lem:boundedness} yields the following.\n\\begin{prop}\\label{prop:existence_lowner}\nLet $f, g \\colon \\Re^d\\to[0,\\infty)$ be proper log-concave functions such that \n$f \\leq g$. Then there are solutions to L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro} and to {Positive L\\\"owner $s$-problem} \\eqref{eq:lowner_problem_pos}.\n\\end{prop}\n\nWe note that as was shown in \\cite{ivanov2021functional}, the solution \nto {Positive L\\\"owner $s$-problem} is not necessary unique even for a radially symmetric function $g$. \n\n\n\n\n\n\n\n\n\n\\section{The normal cone and the subdifferential}\\label{sec:normalcone_subdifferential}\n\nThis section contains properties of the normal cone of log-concave functions, that will be needed in the next section, where we discuss when the rather technical conditions of Theorems~\\ref{thm:john_condition_general} and \\ref{thm:lowner_condition_general} hold.\n\n\\subsection{Subdifferential}\nWe recall several definitions and properties about the subddiferential. \n\nA vector $p$ is said to be a \\emph{subgradient} of a function \n$\\psi \\colon {\\mathbb R}^d \\to {\\mathbb R} \\cup \\{+ \\infty\\}$ at the point $x$ if\n\\begin{equation*\n\\psi(y) \\geq \\psi(x) + \\iprod{p}{y-x}\n\\end{equation*}\nfor all $y \\in {\\mathbb R}^d$.\nThe set of all subgradients of $\\psi$ at $x$ is called the \\emph{subdifferential}\nof $\\psi$ at $x$ and is denoted by $\\partial \\psi(x)$. By definition, we have\n\\begin{lem}[Subdifferential and normals of the epigraph]\\label{lem:subdif_via_ncone}\nLet $\\psi$ be a convex function on ${\\mathbb R}^d.$ Then $p \\in \\partial \\psi(x)$ if and only if\n$(p, -1) \\in \\nfcone{\\operatorname{epi} \\psi}{(x, \\psi(x))}.$\n\\end{lem}\n The following lemma is a basic property of the subdifferential (see \\cite[Theorem 23.5]{rockafellar1970convex}) relating it to the Legendre dual.\n\\begin{lem}[Subdifferential and Legendre dual]\\label{lem:subdif_basics}\n Let $\\psi: {\\mathbb R}^d \\to {\\mathbb R} \\cup \\{+\\infty\\}$ be a lower semi-continuous convex function. \n Then the following three conditions on vectors $u, p \\in {\\mathbb R}^d$ are equivalent\n \\begin{enumerate}\n\\item $p \\in \\partial \\psi(u);$\n\\item $u \\in \\partial\\slogleg[] \\psi(p);$\n\\item \n$ \n\\psi(u) + \\slogleg[]\\psi(p) = \\iprod{p}{u}.\n$\n\\end{enumerate} \n\\end{lem}\n\nAlso, we will use the following well-known fact.\n\\begin{lem}\\label{lem:subdif_lipchitz}\n Let $\\psi: {\\mathbb R}^d \\to {\\mathbb R} \\cup \\{+\\infty\\}$ be a lower semi-continuous convex function containing $\\delta \\ball{d}$ in the interior of its effective domain. \n Then there is a constant $L$ such that $\\enorm{p} \\leq L$ for all \n $p \\in \\partial \\psi(u)$ and $u \\in \\delta \\ball{d}.$\n\\end{lem}\n \n \\subsection{Explicit formula for contact pairs}\n \\label{subsec:normal_cone_via_subdiff}\n \n As an immediate consequence of \\Href{Lemma}{lem:nfcone}, we obtain\n\\begin{lem}[Horizontal normals of $\\lifting{f}$]\\label{lem:ncone_flat_normal}\nLet $f \\colon \\Re^d \\to [0, +\\infty)$ be an upper semi-continuous log-concave function containing the origin in the interior of its support. \nLet $\\upthing{u} = (u, f(u)) \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f}$ and $\\upthing{v} = (v, 0) \\in \\nfcone{\\lifting{f}}{\\upthing{u}}$ with $\\iprod{\\upthing{u}}{\\upthing{v}} = 1.$ \nThen \n\\[\n \\upthing{v} = \\frac{(p,0)}{\\iprod{p}{u}} \n \\quad \\text{and} \\quad \\iprod{p}{u} > 0\n \\]\n for some non-zero $p \\in \\nfcone{\\supp f}{u}.$\n\\end{lem} \nLemmas~\\ref{lem:nfcone} and \\ref{lem:subdif_via_ncone},\nyield the following.\n\\begin{lem}[Non-horizontal normals of $\\lifting{f}$]\n\\label{lem:v_via_grad}\nLet $\\psi \\colon \\Re^d \\to {\\mathbb R} \\cup \\{+\\infty\\}$ be a convex function containing $u$ \nin its domain. Set $f = e^{-\\psi}$ and $\\upthing{u} = (u, f(u))$. Let \n$\\upthing{v}= (v, \\nu)$ with $\\nu \\neq 0.$\nThe following assertion are equivalent:\n\\begin{enumerate}\n\\item $\\upthing{v} \\in \\nfcone{\\lifting{f}}{\\upthing{u}}$ and\n $\\iprod{\\upthing{u}}{\\upthing{v}} =1 $\n\\item \\[\n \\upthing{v} = \n \\frac{\\parenth{p, \\frac{1}{f(u)} }}{1 + \\iprod{p}{u}} \n \\quad \\text{and} \\quad 1 + \\iprod{p}{u} > 0\n \\]\nfor some $p \\in \\partial \\psi(u).$ \n\\end{enumerate} \n\\end{lem}\n\nWe conclude the following.\n\n\\begin{lem}\\label{lem:starlike_criteria}\nLet $g\\colon \\Re^d \\to [0, + \\infty)$ be a proper log-concave function containing the origin in the interior of support. Then, a set $U \\subset \\Re^d$ is a star-like set with respect to~$g$ if and only if the inequality $\\iprod{p}{u} > -1$ holds for all $u \\in U \\cap \\supp g$ and\n $p \\in \\partial(-\\ln g)(u).$\n\\end{lem}\n\\begin{proof}\nFix $u \\in U,$ denote $\\upthing{u} = (u, g(u)),$ and let $\\upthing{v}= \\parenth{v, \\frac{\\nu}{g(u)}} \\in \\nfcone{\\lifting{g}}{\\upthing{u}}.$ If $\\nu \\neq 0.$\nLemmas~\\ref{lem:nfcone} and \\ref{lem:subdif_via_ncone},\nyield that $\\nu > 0$ and \n$v = \\nu p$ for some $p \\in \\partial \\psi(u).$ \nHence, $\\iprod{\\upthing{v}}{\\upthing{u}} = \\nu (1 + \\iprod{p}{u}).$\nIf $\\nu = 0,$ then $u$ is a point of the boundary of $\\supp g.$ By \n\\Href{Lemma}{lem:nfcone} and since the origin is in the interior of the convex set $\\supp g,$ the inequality $\\iprod{\\upthing{v}}{\\upthing{u}} = \\iprod{v}{u} > 0$ holds.\n\\end{proof}\n\n\n\\begin{remark}\nLet $g = e^{-\\psi} \\colon \\Re^d \\to [0, + \\infty)$ be an upper semi-continuous function, \nand let $u$ be a point on the boundary of $\\supp g$ at which $\\psi$ is not sub-differentiable. It is not hard to show that $\\nfcone{\\lifting{g}}{(u, g(u))} = \\nfcone{\\supp g }{u}$ holds in this case. \n\\end{remark}\n\n\n\n\n\n\\section{Properties of the set of contact pairs}\\label{sec:boundedcontactpairs}\n\nThe main topic of the section is the question of when the set of contact pairs \n$\\contactset{f}{g}$ is bounded in $\\Re^{d+1} \\times \\Re^{d+1}$, as this is a crucial \ncondition in Theorems~\\ref{thm:john_condition_general} and \n\\ref{thm:lowner_condition_general}.\n\nAssume that $g \\leq f$. Then $\\contactset{f}{g} \\subseteq \\contactset{g}{g}$ and hence,\nit suffices to impose conditions on $g$ to guarantee the boundedness of $\\contactset{f}{g}$. \n\n\\subsection{A difficulty: flat zeros}\n\nIn order to explain the difficulty of guaranteeing that the contact set is bounded, we first return to the setting of convex sets. \nAssume that $K$ is a compact convex subset of $\\Re^d$ containing the origin in the interior. \nThen for any $u \\in \\partial{K}$, the set \n\\[\nN_u =\\{v \\colon \\iprod{u}{v} = 1 \\quad \\text{and} \\quad v \\in \\nfcone{K}{u}\\}\n\\]\n is a closed subset of the boundary of ${K}^{\\circ}$.\n Hence, the set of contact pairs\n \\[\n\\mathcal{C} = \\{(u, v) \\colon u \\in \\partial{K}, \\; v \\in N_u\\} \n\\]\nis a closed subset of the compact set $\\partial{K}\\cap\\partial{{K}^{\\circ}}$, and hence, is compact. \n\nNext, consider the epigraph of a convex function $\\psi$ with bounded domain.\nIt will be an unbounded convex set in $\\Re^{d+1}$. So, when we turn to the lifting of \nthe corresponding log-concave function $g=e^{-\\psi}$, we may find ill-behaved points. Namely, at any \npoint $u \\in \\cl{\\dom \\psi} \\setminus \n\\dom \\psi$, the normal cone of the epigraph of $\\psi$ is undefined at $(u, \n\\psi(u))$, since there is no such point, as $\\psi(u)=\\infty$. However, the \nnormal cone to the lifting of $g$ at $(u, g(u)) = (u, 0)$ \nis well defined and non-empty by \\Href{Corollary}{cor:normalcone_nonempty}. Most importantly, $\\nfcone{\\lifting{g}}{(u, 0)}$ \nmay contain $e_{d+1}$. It is this particular case that requires additional care. \n\n\\begin{dfn}\\label{def:flatzero}\n For a log-concave function $g\\colon\\Re^d\\to\\Re$, we call a point $u\\in\\cl{\\supp \ng}$ a \\emph{flat zero}, if $g(u)=0$ and $e_{d+1}\\in\\nfcone{\\lifting{g}}{(u, 0)}$.\n\\end{dfn}\n\nTo see why flat zeros pose a difficulty, assume that $g$ and $u$ are as in the definition above, and $f$ is a function with $g\\leq f$ such that $f=g$ on some neighborhood of $u$. Let $v\\in\\Re^d$ be an outer normal vector of the support hyperplane of $\\supp f$ at $u$ with $\\iprod{u}{v}=1$. \nThen $\\big((u,0),(v,\\nu)\\big)$ is in $\\contactsetnr{f}{g}$ for all $\\nu\\in\\Re$, and hence, $\\contactset{f}{g}$ is not bounded.\n\n\nConsider the following examples, $g_1,g_2 \\colon \\Re^d \\to [0, +\\infty)$.\n\\[\ng_1(u) = \n \\begin{cases}\n\\parenth{1 - \\enorm{u}^2}^2,& \n\\text{ if } \\enorm{u} < 1\\\\\n0,&\\text{ otherwise}.\n \\end{cases}\n\\]\n\\[\ng_2(u) = \n \\begin{cases}\n{e^{-\\frac{1}{1 - \\enorm{u}}}},& \n\\text{ if } \\enorm{u} < 1\\\\\n0,&\\text{ otherwise}.\n\\end{cases}\n\\]\n\nClearly, both $g_1$ and $g_2$ are proper log-concave functions of bounded support.\nIt is easy to see that the normal cone of both of them is \n$\\nfcone{\\lifting{g_i}}{(u,0)} =\\{(\\upalpha u,\\nu) \\colon \\upalpha \\geq 0, \\nu \\in {\\mathbb R} \\}$ for any unit vector\n$u \\in \\Re^d$. That is, the sets $\\contactsetnr{g_1}{g_1}$ and \n$\\contactsetnr{g_2}{g_2}$ are unbounded, and every unit vector is a flat zero. Even though for any fixed $\\upthing{u}=(u,0)$ with $\\enorm{u}=1$, the set $\\{(\\upthing{u},\\upthing{v})\\in\\contactset{g_i}{g_i}\\}$ is bounded, since only horizontal vectors $\\upthing{v}$ are present in it, but if $\\enorm{u}<1$ is close to 1, then $\\{(\\upthing{u},\\upthing{v})\\in\\contactset{g_i}{g_i}\\}$ becomes arbitrarily large. Hence $\\contactset{g_i}{g_i}$ is not bounded.\n\n\n\nYet, there is a major difference between these two functions. The function $g_1$ was studied in \\cite{ivanov2022functional} and the conditions of optimality equivalent to that of \\Href{Theorem}{thm:john_condition_general} were obtained therein for an arbitrary proper log-concave function $f$. It was possible, since $g_1$ is $q$-concave with $q=1/4$ (see page~\\pageref{page:qconcave} for the definition), and hence -- as we will see --, its flat zeros can be removed by taking the $q$-th power. On the other hand, $g_2$ is not $q$-concave for any $q>0$. \n\n\n\\subsection{Sufficient conditions}\\label{subsec:sufficient_conditions}\n\n\nIn this subsection, we show that if $g(0)$ is close to $\\min g$, then the conditions in our main theorems (\\Href{Theorems}{thm:john_condition_general} and \\ref{thm:lowner_condition_general}) hold essentially with any $f$.\n\n\\begin{lem}\\label{lem:sufficient_condition_locstar_and_bounded}\nLet $g = e^{-\\psi} \\colon \\Re^d \\to [0, +\\infty)$ be a function satisfying our Basic Assumptions (see page~\\pageref{assumptions:basic}). \nDenote the minimum of $\\psi$ by $m$. Assume that the inequality $\\psi(0) < m+1$ holds.\nThen $\\supp g$ is a star-like set with respect to~$g$. Additionally, let $U$ be a subset of $\\Re^d$ such that\n$\\inf\\limits_{x \\in \\ \\!\\! U \\ \\! \\cap \\ \\! \\supp g } g(x) > 0,$ then \nthen the \nset\n$\nC = \\big\\{\n((u, f(u)), \\upthing{v}) \\in \\contactset{g}{g} \\colon u \\in U \n\\big\\}\n$ is bounded. \n\\end{lem}\n\\begin{proof}\n Fix $\\upthing{u}=(u,g(u)) \\in \\mathrm{ess}\\ \\mathrm{graph}\\ g$ and let \n$\\upthing{v} = (v,\\nu) \\in \\nfcone{\\lifting{g}}{(u,g(u))} \\setminus \\{0\\}.$ \n\nConsider the case when $\\nu=0$. By \\Href{Lemma}{lem:nfcone}, $u$ belongs to the boundary of $\\supp{g}$ and $v \\in \\nfcone{\\supp{g}}{u} \\subset \\Re^d$. \nSince $\\supp g$ is a convex set containing the origin in its interior, \nthere are positive constants $\\delta_0$ and $\\delta_1$ independent of $u$ and $\\upthing{v}$ such that $\\enorm{\\upthing{v}} < \\delta_0$ and $\\iprod{\\upthing{v}}{\\upthing{u}} > \\delta_1$.\n\nNow, assume that $\\nu \\neq 0$.\nBy \\Href{Lemma}{lem:nfcone} and the convexity of $\\psi$, we have that $\\nu > 0$ and \n\\[\n \\iprod{v}{u} \\geq {\\nu}{g(u)}\\parenth{ \\psi(u) - \\psi (0)} \\geq\n {\\nu}{g(u)} \\parenth{ m - \\psi (0)}.\n\\]\nHence,\n\\begin{equation}\\label{eq:admissible_center}\n\\iprod{\\upthing{v}}{\\upthing{u}} = \\iprod{v}{u} + \\nu g(u) \\geq\n{\\nu}{g(u)} \\parenth{ m - \\psi (0) + 1} > 0.\n\\end{equation}\nWe conclude that $\\supp g$ is a star-like set with respect to~$g$.\n\nSet\n$L= \\inf\\limits_{x \\in U \\cap \\supp g} g(x).$ \nNow, assume that $\\iprod{\\upthing{v}}{\\upthing{u}} = 1.$\nThen \\eqref{eq:admissible_center} yields\n\\[\n\\nu \\leq \\frac{1}{\\parenth{ m - \\psi (0) + 1}L }\n\\]\nfor any $( (u, f(u)), \\upthing{v}) \\in \\contactset{g}{g}$ such that $u \\in U$. \nThus, $\\nu$ cannot be too large.\n\\Href{Lemma}{lem:subdif_lipchitz} and \\Href{Lemma}{lem:nfcone} imply that the set \nof contact pairs\n$\n((u,f(u)), \\upthing{v}) \\in \\contactset{g}{g},\n$\n where $u$ is in a sufficiently small neighborhood $V$ of the origin, is bounded. \n Using convexity again and identity \\eqref{eq:normalconelevelset} of \\Href{Lemma}{lem:nfcone}, we see that \n $\\enorm{v}$ cannot be too large outside $V,$ and thus, the proof of the lemma is complete.\n\n\\end{proof}\n\nAs a direct consequence of this result, we obtain the following.\n\\begin{cor}\\label{cor:suffiient_cond_starlike+bounded}\nLet $g_b \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous function such that \n$\\logconc{g_b}$ satisfies our Basic Assumptions (see page~\\pageref{assumptions:basic}). \nDenote the minimum of $-\\ln \\logconc{g_b}$ by $m.$ Assume \nthe inequality \n\\[\n-\\ln \\logconc{g_b}(0) < m +1 \n\\]\nholds. \nThen for any proper log-concave function $f \\colon \\Re^d \\to [0, +\\infty),$ \nthe set $\\contactpoint{f}{g_b}$ is a star-like set with respect to~$f$.\nMoreover, if the infimum of $g_b$ taken over the set $\\contactpoint{f}{g_b} \\cap \\supp g_b$ is strictly greater than zero, then the set\n$\\contactset{f}{g_b}$ is bounded. \n\\end{cor}\n\\begin{remark}\nIf $\\logconc{g_b}$ is of compact support in the assertion of \\Href{Corollary}{cor:suffiient_cond_starlike+bounded}, then for any function $f$ such that the set\n$S = \\contactpoint{f}{g_b} \\cap \\supp g_b$ is non-empty, the infimum of $g_b$ taken over $S$ is strictly greater than zero.\n\\end{remark}\n\n\\subsection{Taking the reduced set of contact pair yields no loss}\n\n\nWe recall that in the assertions of our theorems formulated in the Introduction,\nwe consider the full set of contact pairs, $\\contactsetnr{f}{g}$, and not the \\emph{reduced} set $\\contactset{f}{g}$. The following simple observation allows us to do so.\n\n\\begin{lem}[The vertical component of $\\upthing{v}$ may be ignored at a zero]\\label{lem:flattened_normal_at_zero}\nLet $f \\colon \\Re^d \\to [0, +\\infty)$ be an upper semi-continuous log-concave \nfunction, and $u \\in \\cl{\\supp f}$ be such that $f(u) = 0$. Let $\\upthing{v} \n= (v, \\nu) \\in \\nfcone{\\lifting{f}}{\\upthing{u}}$, where $\\upthing{u}=(u, \nf(u))$, be such that $\\iprod{\\upthing{v}}{\\upthing{u}} = 1$.\n Then $v \\in \\nfcone{\\supp f}{u}$, and\n \\[\n \\contactopjohn{\\upthing{u}}{\\upthing{v}} = \\contactopjohn{\\upthing{u}}{(v,0)} \n \\quad \\text{and} \\quad \\contactoplowner{\\upthing{u}}{\\upthing{v}} = \\contactoplowner{\\upthing{u}}{(v,0)}.\n \\]\n\\end{lem}\n\nThus, if no contact pairs from $\\contactsetnr{f}{g}$ (resp., $\\contactsetnr{\\loglego{f}}{\\loglego{g}}$) satisfy the equations in \n\\Href{Theorem}{thm:john_condition_general} (resp., \n\\Href{Theorem}{thm:lowner_condition_general}), then one can consider only horizontal normals at the zero level when studying separation of the set of extended contact operators from the point $\\parenth{\\mathrm{Id}_{d} \\oplus s, 0}$ in $\\mathcal{W}$.\n\nA more geometric, less algebraic explanation of this fact is that in the proof of \n\\Href{Theorem}{thm:john_condition_general} (resp., \n\\Href{Theorem}{thm:lowner_condition_general}), a point $ \\upthing{u}=(u, 0)$ remains in \n$\\Re^d$ under the corresponding homotopy. That is, the last coordinate of the \nnormal vector does not play a role.\n\n\n\n\n\n\\section{Radially symmetric functions}\\label{sec:radially_symmetric}\n\nWe call a function $f$ on $\\Re^d$ \\emph{radially symmetric},\nif it is of the form $f(x) = F(\\enorm{x})$, where $F$ is a function on \n$[0,+\\infty)$. Clearly, the sets of positions and positive positions of a radially symmetric function coincide. Consequently, the identities \n\\eqref{eq:functional_glmp} and \\eqref{eq:functional_glmp-lowner} are the necessary and sufficient condition in \\Href{Theorem}{thm:john_condition_general} and \n\\Href{Theorem}{thm:lowner_condition_general}, respectively. This way, the \nresults of \\cite{ivanov2022functional} can be recovered from \\Href{Theorem}{thm:john_condition_general}.\n\n\n\n\n\n\n\n\n\\section{The \\texorpdfstring{$q$}{q}-concave case}\\label{sec:qconcave}\n\n\\subsection{Immediate corollaries of Theorems~\\ref{thm:john_condition_general} and \\ref{thm:lowner_condition_general}}\n\nFix $s > 0$ and $q > 0$.\nIf a function $g \\colon\\Re^d\\to[0,+\\infty)$ is a proper $q$-concave function, then\n$\\lifting{g^q}$ is a compact convex set with non-empty interior in $\\Re^{d+1}$, which yields the following.\n\n\\begin{lem}\\label{lem:qpowerok}\nLet $g \\colon\\Re^d\\to[0,+\\infty)$ be a proper $q$-concave function containing the origin in the interior of the support. Then $\\supp{g^q}$ is a star-like subset of $g^q$ and \nthe set $\\contactset{g^q}{g^q}$ is bounded. \n\\end{lem} \nThus, we have the following results.\n\\begin{cor}\\label{cor:john_cond-q-concave}\nFix $s>0$, and let $f,g \\colon\\Re^d\\to[0,+\\infty)$ be two proper log-concave functions.\nAdditionally, assume $g$ is $q$-concave with some $q>0$ and contains the origin in the interior of its support.\n\\begin{enumerate}\n\t\t\\item\n\t\t\tIf $h=g$ is a local maximizer in John $s$-problem \\eqref{eq:john_problem_intro},\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\tilde{u}_1,\\tilde{v}_1)$, $\\dots$, \n\t\t\t$(\\tilde{u}_m,\\tilde{v}_m)$ $\\in \\contactset{f^q}{g^q}$\n\t\t\tand positive weights $\\tilde{c}_1,\\dots,\\tilde{c}_m$ satisfying\n\t\t\t\\begin{equation}\\label{eq:functional_glmp_concave}\n\t\\sum_{i=1}^{m} \\tilde{c}_i {u}_i \\otimes {v}^\\prime_i = \n\t\\mathrm{Id}_{d}, \\quad \n\t\\sum_{i=1}^{m} \\tilde{c}_i f^q(u_i){\\nu}^{\\prime}_i = \\frac{s}{q}\n\t\t\\quad\\text{and}\\quad\n\t\t\\sum_{i=1}^{m} \\tilde{c}_i {v}^\\prime_i=0,\n\\end{equation}\nwhere $\\tilde{u}_i=(u_i, f^q(u_i))$ and $\\tilde{v}_i=(v^\\prime_i,\\nu^\\prime_i)$.\n\t\t\\item\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp_concave},\n\t\t\tthen $g$ is a global maximizer in Positive position John $s$-problem \\eqref{eq:john_problem_pos}.\n\t\\end{enumerate}\n\\end{cor}\n\\begin{proof}\nWe consider the corresponding John $\\frac{s}{q}$-problem for functions $f^q$ and $g^q$, and by \\Href{Lemma}{lem:qpowerok} we may apply \\Href{Theorem}{thm:john_condition_general}.\n\\end{proof}\n\\begin{cor}\\label{cor:lowner_cond-q-concave}\nFix $s>0$, and let $f,g \\colon\\Re^d\\to[0,+\\infty)$ be two proper log-concave functions.\nAssume that $\\loglego{g}$ is $q$-concave with some $q>0$, and contains the origin in the interior of its support.\nThen the following hold.\n\t\\begin{enumerate}\n\t\t\\item\n\t\t\tIf $h=g$ is a local minimizer in L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro} for $f$ and $g$,\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\tilde{u}_1,\\tilde{v}_1), \\dots, \n\t\t\t(\\tilde{u}_m,\\tilde{v}_m) \\in \n\t\t\t\\contactset{\\loglego{\\parenth{g^q}}}{\\loglego{\\parenth{f^q}}}$\n\t\t\tand positive weights $\\tilde{c}_1,\\ldots,\\tilde{c}_m$ such that\n\t\t\\begin{equation}\\label{eq:functional-glmp-lowner_concave}\n\t\t\\sum_{i=1}^{m} \\tilde{c}_i {v}^\\prime_i \\otimes {u}^\\prime_i = \n\t\t \\mathrm{Id}_{d}, \\quad \n\t\t \\sum_{i=1}^{m} \\tilde{c}_i \\loglego{\\parenth{g^q}}(u_i^\\prime) \\cdot \\nu_i^\\prime = \\frac{s}{q}.\n\t\t\t\t\t\\quad\\text{and}\\quad\n\t\t\t\t\t\\sum_{i=1}^{m} \\tilde{c}_i\\loglego{\\parenth{g^q}}(u_i^\\prime) \\cdot \\nu_i^\\prime u_i^\\prime=0,\n \\end{equation} \n where $\\tilde{u}_i=(u_i^\\prime, \\loglego{\\parenth{g^q}}(u_i^\\prime))$ and $\\tilde{v}_i=(v_i^\\prime,\\nu_i^\\prime)$.\n\t\t\\item\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp-lowner},\n\t\t\tthen $h=g$ is a global maximizer in Positive position L\\\"owner $s$-problem \\eqref{eq:lowner_problem_pos} for $f$ and $g$.\n\t\\end{enumerate}\n\\end{cor}\n\\begin{proof}\nObserve that \n\\[\n\\loglego{\\parenth{g^{q}}}(y) = \\parenth{\\loglego{g}\\!\\parenth{\\frac{y}{q}}}^{q}\n\\]\nfor any $q > 0.$ Hence, $\\loglego{\\parenth{g^q}}$ is $1$-concave.\n We consider the corresponding L\\\"owner $\\frac{s}{q}$-problem for functions $f^q$ and $g^q$, and by \\Href{Lemma}{lem:qpowerok} we may apply \\Href{Theorem}{thm:lowner_condition_general}.\n\\end{proof}\n\n\nWe note that in these two corollaries, we choose the origin arbitrarily inside the interior of the support of corresponding functions.\nTo obtain the theorems formulated in the Introduction, we need a more subtle argument.\n\n\n\n\n\\subsection{Taking power and algebraic identities involving the contact pairs}\n\nIn this subsection, we discuss how replacing $f$ and $g$ by $f^q$ and $g^q$ changes the form of identities \\eqref{eq:functional_glmp} and \\eqref{eq:functional_glmp-lowner}.\n\n\\begin{lem}\\label{lem:equiv_john_cond_power}\nFix $s > 0$ and $ q \\in ( 0, 1]$. Let $f\\colon\\Re^d \\to [0,+\\infty)$ be a proper log-concave function,\nand let $g_b \\colon \\Re^d \\to [0, \\infty)$ be an upper semi-continuous function \nsuch that \n\\begin{itemize}\n\\item $\\logconc{g_b}$ satisfies our Basic Assumptions (see page~\\pageref{assumptions:basic}); \n\\item the set of contact points $\\contactpoint{f}{g_b}$ is a star-like set with respect to~$f$.\n\\end{itemize}\nSet $\\upthing{u}_i = (u_i, f(u_i))$ and $\\tilde{u}_i = (u_i, f^q(u_i)).$\nThen the following assertions are equivalent:\n\\begin{itemize}\n\\item There are contact pairs $(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactset{f}{g_b}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ that satisfy the identities in \n\t\t\t\\eqref{eq:functional_glmp}.\n\\item There are contact pairs $(\\tilde{u}_1,\\tilde{v}_1), \\dots, \n\t\t\t(\\tilde{u}_m, \\tilde{v}_m) \\in \\contactset{f^q}{g_b^q}$\n\t\t\tand positive weights $\\tilde{c}_1,\\ldots,\\tilde{c}_m$ that satisfy the identities in \n\t\t\t\\eqref{eq:functional_glmp_concave}.\n\\end{itemize} \n\\end{lem}\n\n\\begin{proof}\nFix $\\upthing{u}_i = (u_i, f(u_i)) \\in \\mathrm{ess}\\ \\mathrm{graph}\\ {f}$ and\n $\\upthing{v}_i = (v_i, \\nu_i) \\in \\nfcone{\\lifting{f}}{\\upthing{u}_i}$ with \n $\\iprod{\\upthing{u}_i}{\\upthing{v}_i} = 1.$\n \n \nWe consider two cases.\n\n\\emph{Case of a horizontal normal:} \nAssume that $\\nu_i =0$. \nBy Lemmas~\\ref{lem:nfcone_at_zero} and \\ref{lem:ncone_flat_normal}, the following assertion are equivalent.\n\\begin{itemize}\n\\item $\\parenth{\\upthing{u}_i, \\upthing{v}_i} \\in \\contactset{f}{g_b};$\n\\item $\\parenth{\\tilde{u}_i, \\tilde{v}_i} \\in \\contactset{f^q}{g_b^q},$ \nwhere $\\tilde{u}_i =(u_i, f^q(u_i))$ and $\\tilde{v}_i = \\upthing{v}_i = (v_i, 0)$.\n\\end{itemize} \nFinally, \n\\[ \\contactopjohn{\\upthing{u}_i}{\\upthing{v}_i} =\n\\parenth{u_i \\otimes v_i\\oplus f(u_i) \\nu_i, v_i } = \n \\parenth{u_i \\otimes v_i\\oplus 0, v_i }\n = \\contactopjohn{\\tilde{u}_i}{\\tilde{v}_i} \\]\nWe set $\\tilde{c}_i = c_i$ in this case.\n\n\\emph{Case of a non-horizontal normal:} \nAssume that $\\nu_i > 0$. By the definition of $\\contactset{f}{g_b}$, we have $f(u_i) > 0$. \n\nSince $\\contactpoint{f}{g_b}$ is a star-like set with respect to~${f}$ \nand by \\Href{Lemma}{lem:v_via_grad},\n\\[\n \\upthing{v}_i =\n \\frac{\\parenth{p_i, \\frac{1}{f(u_i)} }}{1 + \\iprod{p_i}{u_i}} \n\\]\nand $1 + \\iprod{p_i}{u_i} > 0 $ for some $p \\in \\partial (- \\ln f)(u_i)$.\nHence, $1 + \\iprod{p_i}{u_i} > 0$ and $1 + q\\iprod{p_i}{u_i} > 0.$ \nThus, \n\\Href{Lemma}{lem:v_via_grad} implies that the following assertion are equivalent:\n \\begin{itemize}\n \\item $\\parenth{\\upthing{u}_i, \\upthing{v}_i} \\in \\contactset{f}{g_b}$ and \n$\n \\upthing{v}_i = \n \\frac{\\parenth{p_i, \\frac{1}{f(u_i)} }}{1 + \\iprod{p_i}{u_i}}\n$\nfor some $p_i \\in \\partial(- \\ln f)(u_i);$\n\\item $\\parenth{\\tilde{u}_i, \\tilde{v}_i} \\in \\contactset{f^q}{g_b^q},$ \nwhere $\n \\tilde{v}_i = \n \\frac{\\parenth{q p_i, \\frac{1}{f^q(u_i)} }}{1 + q \\iprod{ p_i}{u_i}}\n$ for $p_i \\in \\partial(- \\ln f)(u_i)$\nand \n$\n\\tilde{u}_i = (u_i, f^q(u_i)).\n$\n \\end{itemize}\nBy substitution,\n\\[ \nc_i \\contactopjohn{\\upthing{u}_i}{\\upthing{v}_i} = \nc_i \\frac{\\parenth{({u}_i \\otimes {p}_i) \\oplus 1, p_i}}{1 + \\iprod{p_i}{u_i}} \n\\quad \\text{and} \\quad\n \\tilde{c}_i \\contactopjohn{\\tilde{u}_i}{\\tilde{v}_i} = \n c_i \\frac{\\parenth{({u}_i \\otimes {p}_i) \\oplus \\frac{1}{q}, p_i}}{1 + \\iprod{p_i}{u_i}} ,\n \\]\n where $\\tilde{c}_i = \\frac{c_i}{q} \\frac{1 + q\\iprod{p_i}{u_i}}{1 + \\iprod{p_i}{u_i}}$.\n\\end{proof}\n\n\n\n\n\\begin{lem}\\label{lem:equiv_lowner_cond_power}\nFix $s > 0$ and $ q \\in ( 0, 1]$. Let $f,g \\colon\\Re^d \\to [0,+\\infty)$ be be two proper log-concave functions such that\n\\begin{itemize}\n\\item $\\loglego{g}$ satisfies our Basic Assumptions (see page~\\pageref{assumptions:basic}); \n\\item the set of contact points $\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$\\loglego{f}$.\n\\end{itemize}\nSet $\\upthing{u}_i = (u_i, \\loglego{f}(u_i))$ and \n$\\tilde{u}_i = \\parenth{q{u_i}, \\loglego{(f^q)} \\!\\parenth{{qu_i}}}.$\nThen the following assertions are equivalent:\n\\begin{itemize}\n\\item There are contact pairs $(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactset{\\loglego{f}}{\\loglego{g}}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ that satisfy the identities in \n\t\t\t\\eqref{eq:functional_glmp-lowner}.\n\\item There are contact pairs $(\\tilde{u}_1,\\tilde{v}_1), \\dots, \n\t\t\t(\\tilde{u}_m, \\tilde{v}_m) \\in \\contactset{\\loglego{(f^q)}}{\\loglego{(g^q)}}$\n\t\t\tand positive weights $\\tilde{c}_1,\\ldots,\\tilde{c}_m$ that satisfy the identities in \n\t\t\t\\eqref{eq:functional-glmp-lowner_concave}.\n\\end{itemize} \n\\end{lem}\n\\begin{proof}\nObserve that \n\\[\n\\loglego{\\parenth{f^{q}}}(y) = \\parenth{\\loglego{f}\\!\\parenth{\\frac{y}{q}}}^{q}\n\\]\nfor any $q > 0.$\nThe rest of the proof is similar to that of the previous lemma. We omit the details.\n\nThe corresponding substitutions are as follows: \n$\\tilde{v}_i = \\parenth{\\frac{v_i}{q}, 0}$ and $\\tilde{c}_i = c_i$ in the case \n$\\nu_i= 0$, and \n$\\tilde{v}_i = \\frac{\\parenth{p_i, \\frac{1}{\\parenth{\\loglego{f}(q_i)}^q} }}{1 + q \\iprod{ p_i}{u_i}}\n$ for $p_i \\in \\partial(- \\ln \\loglego{f})(u_i)$ and $\n \\tilde{c}_i = \n \\frac{c_i}{q} \n \\frac{1 + q\\iprod{p_i}{u_i}}{1 + \\iprod{p_i}{u_i}}\n$ in the case\n $\\nu_i > 0$.\n\\end{proof}\n\n\n\n\n\n\n\n\n\n\n\\subsection{Taking power and the boundedness of contact pairs} \n\nFix $q \\in (0,1]$. If one considers $f^{q}$ and $g^q$ instead of $f$ and $g$,\nthen the corresponding liftings are ``more'' concave, which implicitly implies that the corresponding problem is easier to solve. In some sense, this is indeed true. \n\\Href{Lemma}{lem:starlike_criteria} ensures that if $\\contactpoint{f}{g}$ is star-like set with respect to~$f$,\nthen $\\contactpoint{f^q}{g^q}$ is star-like set with respect to~$f^q$. The following result shows that taking power of the functions does not destroy the boundedness of contact pairs.\n\n\\begin{lem}[Replacing $g$ by $g^{\\upalpha}$ and boundedness]\\label{lem:power_of_bounded}\nLet $g \\colon \\Re^d \\to [0, +\\infty)$ be a proper log-concave function such that the set \n$\\contactset{g}{g}$ is bounded. Then for any $\\upalpha \\in (0,1],$ the function \n$g^{\\upalpha}$ is a proper log-concave function and the set \n$\\contactset{g^\\upalpha}{g^\\upalpha}$ is bounded.\n\\end{lem}\n\\begin{proof}\nWe only need to show that if for all $u\\in\\cl{\\supp g}$, we take all possible normals $\\upthing{v}=(v,\\nu)$ to $\\lifting{g^\\upalpha}$ at $\\upthing{u}=(u,g(u))$, then we obtain a bounded set. We fix a $u$.\nIn the case when $\\nu=0$, by \\Href{Lemma}{lem:ncone_flat_normal}, $\\lifting{g}$ and $\\lifting{g^\\upalpha}$ have the same set of horizontal normals at $u$. Thus, we may assume that $\\nu>0$, and hence $u\\in\\supp g$.\n\nUsing \\Href{Lemma}{lem:v_via_grad}, we see that \n $v = \\frac{p}{1 + \\iprod{p}{u}}$ for some non-zero $p \\in \\partial \\psi(u)$ and \n $1 + \\iprod{p}{u} > 0$, where $g=e^{-\\psi}$.\nUsing \\Href{Lemma}{lem:v_via_grad} again, we see that \n$\\upthing{v}_\\upalpha = (v, \\nu_\\upalpha)\\in \\nfcone{\\lifting{g^\\upalpha}}{(u, g^\\upalpha(u))}$ with some $\\nu_\\upalpha > 0$ if and only if\n\\[\n\\upthing{v}_\\upalpha = \n\\frac{\\parenth{ \\upalpha p, \\frac{1}{g^\\upalpha(u)} }}{1 + \\upalpha\\iprod{ p}{u}}\n\\] \nfor some $p \\in \\partial \\psi(u).$\nNote $1 + \\upalpha \\iprod{p}{u} > \\upalpha (1 + \\iprod{p}{u}) > 0.$ \nSince $g$ is bounded, there is a constant $L>0$ such that\n\\[\n\\frac{1}{g^{\\upalpha}(u)} \\leq \\frac{L}{g(u)}\n\\]\nfor all $u \\in \\supp{g}.$ \nThus, \n\\[\n\\enorm{\\upthing{v}_\\upalpha} \\leq \\parenth{1 + \\frac{L}{\\upalpha}}\n\\enorm{\\frac{\\parenth{p, \\frac{1}{g(u)}}}{1 + \\iprod{p}{u}}}.\n\\] \nBy \\Href{Lemma}{lem:v_via_grad}, the latter vector is in $\\nfcone{\\lifting{g}}{f}$, and the lemma follows.\n\\end{proof} \n\nUsing \\Href{Lemma}{lem:power_of_bounded} and \n \\Href{Lemma}{lem:starlike_criteria}, we see that if the functions $f,g_b\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}, then the functions $f^\\upalpha,g_b^\\upalpha \\colon \\Re^d\\to[0,+\\infty)$ satisfy it as well for any $\\upalpha \\in (0,1].$\nIt means that the John conditions in the corresponding problems are fulfilled simultaneously, which is ensured by \\Href{Lemma}{lem:equiv_john_cond_power}.\n\nSimilarly, using \\Href{Lemma}{lem:power_of_bounded} and \n \\Href{Lemma}{lem:starlike_criteria}, we see that if the functions $f, g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}, then the functions $f^\\upalpha,g^\\upalpha \\colon \\Re^d\\to[0,+\\infty)$ with the same $c$ satisfy it as well for any $\\upalpha \\in (0,1].$\nIt means that the L\\\"owner conditions in the corresponding problems are fulfilled simultaneously, which is ensured by \\Href{Lemma}{lem:equiv_lowner_cond_power}.\n\n\n\n\n\n\n \n\n\\section{Proofs of the results presented in the Introduction}\\label{sec:introproofs}\n\n\n\\begin{proof}[Proof of \\Href{Theorem}{thm:john_intro}]\nThe proof mostly repeats the argument we used for \\Href{Theorem}{thm:lowner_intro} in \\Href{Section}{sec:radially_symmetric}.\nSince $f$ takes only positive values and the support of $g$ is bounded, we see that\n\\[\n\\inf\\limits_{x \\in\\ \\contactpoint{f}{g}\\ \\cap\\ \\supp g} g (x) > 0.\n\\]\nBy \\Href{Lemma}{lem:sufficient_condition_locstar_and_bounded}, we have that \n$\\contactpoint{f}{g}$ is a star-like set with respect to~$f$ and $\\contactset{f}{g}$ is bounded. \nThus, the assumptions on functions in \\Href{Theorem}{thm:john_condition_general} are fulfilled. The necessity of condition \\eqref{eq:functional_glmp_intro} follows from this and \\Href{Lemma}{lem:v_via_grad}. \nBy the argument from \\Href{Section}{sec:radially_symmetric} for radially symmetric,\n function the sufficiency of the corresponding conditions follows as well.\n\\end{proof}\n\n\n\\begin{proof}[Proof of \\Href{Theorem}{thm:john_intro-concave}]\nIf $q > 1,$ then $g$ is $1$-concave. Since $\\lifting{g}$ is a compact convex set in $\\Re^{d+1}$, the assumptions on the functions in \\Href{Theorem}{thm:john_condition_general} are fulfilled. The necessity of condition \\eqref{eq:functional_glmp_concave_intro} follows.\n\n\nAssume that $q \\in (0,1].$ Then\nby \\Href{Lemma}{lem:sufficient_condition_locstar_and_bounded}, \n$\\contactpoint{f}{g}$ is a star-like set with respect to~$f$, \nand $\\contactset{f^q}{g^q}$ is bounded, since $\\lifting{g^q}$ is a compact convex set in $\\Re^{d+1}$. \nThus, there are contact pairs \n\t\t\t$(\\tilde{u}_1,\\tilde{v}_1)$, $\\dots$, \n\t\t\t$(\\tilde{u}_m,\\tilde{v}_m)$ $\\in \\contactset{f^q}{g^q}$\n\t\t\tand positive weights $\\tilde{c}_1,\\dots,\\tilde{c}_m$ satisfying\n\t\t\\eqref{eq:functional_glmp_concave}. \n\t\tThe necessity of condition \\eqref{eq:functional_glmp_concave_intro} follows from \\Href{Lemma}{lem:equiv_john_cond_power}.\n\nBy the argument from \\Href{Section}{sec:radially_symmetric} for radially symmetric,\n function the sufficiency of the corresponding conditions follows as well.\n\\end{proof}\n\n\n\n\\begin{proof}[Proof of \\Href{Theorem}{thm:lowner_intro}]\nSince $\\loglego{f}$ takes only positive values and the support of $\\loglego{g}$ is bounded, we see that\n\\[\n\\inf\\limits_{x \\in\\ \\contactpoint{\\loglego{f}}{\\loglego{g}}\\ \\cap\\ \\supp \\loglego{g}} \\loglego{g} (x) > 0.\n\\]\nBy \\Href{Lemma}{lem:sufficient_condition_locstar_and_bounded}, we have that \n$\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$f$ and $\\contactset{\\loglego{f}}{\\loglego{g}}$ is bounded. \nThus, the assumptions on functions in \\Href{Theorem}{thm:lowner_condition_general} are fulfilled. The necessity of condition \\eqref{eq:functional_glmp_lowner_intro} follows from this and \\Href{Lemma}{lem:v_via_grad}. \nBy the argument at the beginning of the present section, the sufficiency of the corresponding conditions follows as well.\n\\end{proof}\n\n\n\\begin{proof}[Proof of \\Href{Theorem}{thm:lowner_intro-concave}]\nIf $q > 1,$ then $\\loglego{g}$ is $1$-concave. Since $\\lifting{\\loglego{g}}$ is a compact convex set in $\\Re^{d+1}$, the assumptions on functions in \\Href{Theorem}{thm:lowner_condition_general} are fulfilled. The necessity of condition \\eqref{eq:functional_glmp-lowner-conc_intro} follows.\n\n\nAssume now $q \\in (0,1].$ Then\nby \\Href{Lemma}{lem:sufficient_condition_locstar_and_bounded}, \n$\\contactpoint{\\loglego{f}}{\\loglego{g}}$ is a star-like set with respect to~$\\loglego{f}$, \nand $\\contactset{\\loglego{(f^q)}}{\\loglego{(g^q)}}$ is bounded by convexity. \nThus, there are contact pairs \n\t\t\t$(\\tilde{u}_1,\\tilde{v}_1)$, $\\dots$, \n\t\t\t$(\\tilde{u}_m,\\tilde{v}_m)$ $\\in \\contactset{f^q}{g^q}$\n\t\t\tand positive weights $\\tilde{c}_1,\\dots,\\tilde{c}_m$ satisfying\n\t\t\\eqref{eq:functional-glmp-lowner_concave}. \n\t\tThe necessity of condition \\eqref{eq:functional_glmp-lowner-conc_intro} follows from \\Href{Lemma}{lem:equiv_lowner_cond_power}.\n\nBy the argument from \\Href{Section}{sec:radially_symmetric} for radially symmetric,\n function the sufficiency of the corresponding conditions follows as well.\n\\end{proof}\n\n\n\n\n\\section{Discussion}\\label{sec:corollaries_and_discussion}\n\n\\subsection{Convex sets as a special case}\n\nObserve that \\Href{Theorem}{thm:GLMP} immediately follows from\n\\Href{Theorem}{thm:john_condition_general} by setting $f = \\chi_{L}$ and $g_b \n= \\chi_{\\operatorname{ext}{K}}$ therein.\n\n\n\n\\subsection{Equivalence of the John and the L\\\"owner problems}\\label{sec:johnlownerequivalence}\nLet $f,g\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}. \nAdditionally, let $f$ satisfy Basic assumptions \\eqref{assumptions:basic}, $\\contactpoint{f}{g}$ be a star-like set with respect to~$g$, and $\\contactset{f}{g}$ be bounded.\nClearly, if $g$ is a local minimizer in the L\\\"owner $s$-problem \\eqref{eq:lowner_problem_intro}, then $f$ is a local maximizer in the John $s$-problem \\eqref{eq:john_problem_intro} for $g$ and $f$. Hence, both \\Href{Theorem}{thm:lowner_condition_general} and \\Href{Theorem}{thm:john_condition_general} are applicable in this case. We show that the corresponding versions of \n\\eqref{eq:john_problem_intro} and \\eqref{eq:lowner_problem_intro} are equivalent in this case.\n\nSince $\\supp{\\loglego{g}}$ is bounded, we have that $\\supp g = \\supp \\loglego{f} = {\\mathbb R}^d$. Therefore, there are no ``flat'' contact points neither for the pair $f$ and $g,$ nor for $\\loglego{f}$ and $\\loglego{g}$. More precisely,\nif $\\parenth{\\upthing{u}, \\upthing{v}}$ with $\\upthing{u} = (u, f(u))$ and $\\upthing{v} = (v, \\nu)$ belongs to $\\contactset{f}{g}$ (resp. $\\contactset{\\loglego{g}}{\\loglego{f}}$)\nthen $\\nu > 0$ and $f(u) > 0$ (resp. $\\loglego{f}(u) > 0$).\nIn this case, \\Href{Lemma}{lem:v_via_grad} yields \n \\[\n \\contactopjohn{\\upthing{u}}{\\upthing{v}} = \n\\frac{\\parenth{({u} \\otimes {p}) \\oplus 1, p}}{1 + \\iprod{p}{u}} \n \\]\nfor some $p \\in \\partial{\\parenth{- \\ln f}}(u) \\subset \\partial{\\parenth{- \\ln g}}(u).$ \nBy the properties of the subdifferential listed in \\Href{Lemma}{lem:subdif_basics}, \n\\[\n u \\in \\partial{\\parenth{- \\ln \\loglego{g}}}(p) \\subset \\partial{\\parenth{- \\ln \\loglego{f}}}(p)\\] \n and\n$ \\loglego{g}(p) = \\loglego{f}(p) = \\frac{e^{- \\iprod{p}{u}}}{f(u)}.$ \nHence, the pair $\\parenth{\\upthing{u}, \\upthing{v}}$ belongs to $\\contactset{f}{g}$ \nif and only if the pair \n$\\parenth{\\tilde{u}, \\tilde{v}}$ belongs to $\\contactset{\\loglego{g}}{\\loglego{f}},$\nwhere \n$\\tilde{u} = (p, \\loglego{g}(p))$ and \n$\\tilde{v} = \\frac{\\parenth{u, \\frac{1}{\\loglego{g}(p)} }}{1 + \\iprod{p}{u}}.$\nBy direct calculations,\n \\[\n \\contactopjohn{\\upthing{u}}{\\upthing{v}} = \n \\contactoplowner{\\tilde{u}}{\\tilde{v}}. \n \\]\nThe desired equivalence follows.\n\n\n\n\\subsection{The fixed center John and L\\\"owner problems -- no translation}\nOur extended contact operator $(A \\oplus \\upalpha, a)\\in\\mathcal{M}$ consists of two parts: \nthe operator part $A \\oplus \\upalpha$ and the translation part $a$.\nIn essence, only the rightmost equation in \\eqref{eq:john_problem_intro} (resp. \\eqref{eq:lowner_problem_intro}) is responsible for the translation (shifting) of the function. If $A$ is a non-singular matrix of order $d$ and $\\upalpha > 0$,\nthen $\\lifting{\\upalpha g(Ax)}$ is the linear image of $\\lifting{g}$.\n\n\nWe will say that we consider the John or the L\\\"owner $s$-problem (resp., \nPositive John/L\\\"owner $s$-problem) \n\\emph{with fixed center} if we maximize or minimize over $\\funposfc{g}$ (resp., $\\funpposfc{g}$), where\n$\\funposfc{g}=\\{\\upalpha g(Ax+a)\\colon (A\\oplus\\upalpha, a)\\in\\mathcal{M}_{f.c.}\\}$\nand\n$\\funpposfc{g}=\\{\\upalpha g(Ax+a)\\colon (A\\oplus\\upalpha, a)\\in\\mathcal{M}_{f.c.}^+\\}$,\nwhere $\\mathcal{M}_{f.c.} = \\left\\{(A \\oplus \\upalpha, 0) \\colon (A \\oplus \\upalpha, 0) \\in \\mathcal{M} \\right\\}$ and \n$\\mathcal{M}_{f.c.}^{+} = \\left\\{(A \\oplus \\upalpha, 0) \\colon (A \\oplus \\upalpha, 0) \\in \\mathcal{M}^{+} \\right\\}$.\n\n\\begin{thm}[Fixed center John condition]\\label{thm:john-condition-fixed_center}\nLet the functions $f,g_b\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:john_condition_general}.\nSet $g =\\logconc{g_b}$. Then the following hold.\n\t\\begin{enumerate}\n\t\t\\item\n\t\t\tIf $h=g$ is a local maximizer in the John $s$-problem with fixed center,\n\t\t\t\tthen there exist contact pairs \n\t\t\t$(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactset{f}{g_b}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ such that\n\t\t\\begin{equation}\\label{eq:functional_glmp_fixed_center}\n\t\t\\sum_{i=1}^{m} c_i {u}_i \\otimes {v}_i = \n\t\t \\mathrm{Id}_{d} \t\t\t\t\n\t\t \t\\quad\\text{and}\\quad \n\t\t \\sum_{i=1}^{m} c_i f(u_i)\\nu_i = s\n\t\t\t\t\\end{equation}\n where $\\upthing{u}_i=(u_i, f(u_i))$ and $\\upthing{v}_i=(v_i,\\nu_i)$.\n\t\t\\item\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp_fixed_center},\n\t\t\tthen $h=g$ is a global maximizer in the Positive John $s$-problem with fixed center. \n\t\\end{enumerate}\n\\end{thm}\n\\begin{proof}[Sketch of the proof]\nIn \\Href{Lemma}{lem:separation_John_problem}, \nwe consider the linear span of $\\mathcal{M}_{f.c.},$ which is a subspace of $\\mathcal{W}.$\nBy a standard separation argument, \n there are no contact pairs of $f$ and $g_b$ and positive weights satisfying equation \\eqref{eq:functional_glmp_fixed_center} if and only if, a linear hyperplane \n with normal of the form $(H \\oplus \\gamma, 0)$ strongly separates the set of contact operators and $(\\mathrm{Id} \\oplus s, 0).$ \n The rest of the proof coincides with that of \\Href{Theorem}{thm:john_condition_general}.\n\\end{proof}\n\nSimilarly, we have the following.\n\n\\begin{thm}[Fixed center L\\\"owner condition]\\label{thm:lowner_condition_fixed_center}\nLet the functions $f,g_b\\colon\\Re^d\\to[0,+\\infty)$ satisfy the assumptions of \\Href{Theorem}{thm:lowner_condition_general}.\nThen the following hold.\n\t\\begin{enumerate}\n\t\t\\item\\label{item:lowner_fixed_center}\n\t\t\tIf $h=g$ is a local minimizer in the L\\\"owner $s$-problem with fixed center,\n\t\t\tthen there exist contact pairs \n\t\t\t$(\\upthing{u}_1,\\upthing{v}_1), \\dots, \n\t\t\t(\\upthing{u}_m,\\upthing{v}_m) \\in \\contactset{\\loglego{g}}{\\loglego{f}}$\n\t\t\tand positive weights $c_1,\\ldots,c_m$ such that\n\t\t\\begin{equation}\\label{eq:functional_glmp_lowner_fixed_c}\n\t\t\\sum_{i=1}^{m} c_i {v}_i \\otimes {u}_i = \n\t\t \\mathrm{Id}_{d}, \\quad \n\t\t \\sum_{i=1}^{m} c_i \\loglego{g}(u_i) \\cdot \\nu_i = s.\n \\end{equation} \n where $\\upthing{u}_i=(u_i, \\loglego{g}(u_i))$ and $\\upthing{v}_i=(v_i,\\nu_i)$.\n\t\t\\item\n\t\t\tIf there exist contact pairs and positive weights satisfying equation \\eqref{eq:functional_glmp_lowner_fixed_c},\n\t\t\tthen $h=g$ is a global maximizer in the Positive position L\\\"owner $s$-problem with fixed center.\n\t\\end{enumerate}\n\\end{thm}\n\nWe note that in the case of fixed center problems, the corresponding conditions, \nthat is, equations \\eqref{eq:functional_glmp_fixed_center} and \n\\eqref{eq:functional_glmp_lowner_fixed_c} coincide whenever both theorems are \napplicable.\n\n\n\n\n\\subsection*{Acknowledgement} We thank Alexander Litvak for the many discussions on Theorem~\\ref{thm:GLMP}. Igor Tsiutsiurupa participated in the early stage of this project.\nTo our deep regret, Igor chose another road for his life and stopped working with us.\n\n\n\n\\bibliographystyle{amsalpha} \n"} +{"id": "red-arxiv-14", "source_id": "red-arxiv_14_red-arxiv-14", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\\label{sec:intro}\nFuture 6G networks are envisioned as an unprecedented evolution from connected things to connected intelligence, thereby serving as the backbone of a cyber-physical world with the integration of connected devices, intelligence, and humans \\cite{surveyEdgeShi}. Numerous 6G \\gls{ai} applications have emerged recently to improve efficiency and system performance in many vertical sectors, such as industrial automation \\cite{dAIFaultDetectFactory}, autonomous driving \\cite{li2019federated}, and enhanced mobile broadband \\cite{ganjCascaded}. Promising performance gains of \\gls{ai} models come with the significant training workload relying on the massive amount of data collected by the edge devices. Centralized training of the models can be impractical in many wireless communication applications because of i) the distributed nature of the data generated/collected by mobile devices, ii) privacy concerns of sharing the local data with a central server, especially when the computational server is managed by a third party operator, and iii) limited wireless resources (in terms of bandwidth and power). Therefore, privacy-preserving distributed \\gls{ai} techniques have become the cornerstone of recent advancements in \\gls{ai} applications over wireless networks. In most distributed training algorithms, a set of \ndevices upload their local updates (e.g., local gradients in \\gls{dgd} \\cite{duttaKsync}, or local model updates in \\gls{fl} \\cite{fl}) via an \\gls{ul} channel to a central node (or a set of nodes) that maintains global parameters and orchestrates the iterations of the distributed training algorithm. Once the central node updates the global model, it shares it with the devices over a \\gls{dl} channel for the next iteration. \n\nTo highlight the complex interplay between the \\gls{ai} workflow and the underlying communication services, we note that the performance of distributed learning algorithms is affected by the errors and random delays in \\gls{ul}/\\gls{dl} transmissions as well as the AI parameters (e.g., model size, data quality, and training algorithm)~\\cite{saadConvergOpt,chen2021Survey,ganjInterplay}. More specifically, the wall-clock convergence time of distributed training algorithms depend on i) the time delay of every iteration (e.g., the amount of time in which global model parameters are transmitted to the devices, trained locally, and transmitted back to the central node), and ii) the number of iterations. The former is not only a function of the model and data sizes, but also the quality of the wireless channel between the central node and individual computational devices. The general perception is that increasing the \\gls{ai} model size improves the training accuracy \\cite{mobileNets}, given enough data samples and a proper training approach that reduces over-fitting. However, using a larger \\gls{ai} model also means longer communication and computation time, resulting in a potentially higher convergence time \\cite{saadConvergOpt, khaledJsacFLConvergWireless}. Higher AI communication overheads may also be detrimental for other communication services running in parallel to the AI. The tighter the requirements of the underlying service, the harder to design smooth coexistence.\n\n\\Gls{urllc} is characterized by strict requirements in terms of latency, which could be as short as $500$\\,$\\mu$s, and availability, which could be as high as $99.9999999$ \\cite{3GPP22104}. In \\cite{3GPP22104,3GPP22261}, \\gls{3gpp} defines \\textit{communication service availability} as the mean proportion of time during which the communication service meets the required \\gls{qos} of the application it serves. Regarded as the most challenging use case in 5G and beyond 5G, this type of service is supposed to enable challenging applications (e.g., factory automation or autonomous intelligent transport systems \\cite{urllcApp}) that have not been feasible in preceding generations of wireless communication systems.\n\\subsection{Uniqueness of Coexistence of \\gls{urllc} and Distributed \\gls{ai}}\nThere exist a rich literature when it comes to conventional mixed services between \\gls{urllc} and \\gls{embb} (e.g., \\cite{urllcEmbbTransCoex,anand2020jointUrllcEmbb}), \\gls{urllc} and \\gls{mmtc} (e.g., \\cite{uavUrllcMmtc}), or even all three together (e.g., \\cite{popovski20185gUrllcEmbbMmtc}). Notice that there are two main fundamental differences between distributed \\gls{ai} services and other traditional communication traffic. First, the performance of these conventional services is characterized by the statistics of a communication metric (e.g., throughput or energy consumption for \\gls{embb} and \\gls{mmtc}, respectively). Nevertheless, distributed training service is an iterative and collaborative task aiming to solve an optimization problem as quickly as possible. Hence, the performance may not be affected only by the transmission characteristics of a single device. The potential statistical correlation of data of various devices allows distributed learning to operate using a carefully chosen subset of devices. Moreover, higher quality updates from few devices at each iteration may be more beneficial than lower quality updates from many devices \\cite{denizUpdateAware}, showing that sum throughput is not the right metric due to the dependence among data across different devices. Second, in addition to well-known decision parameters for other communication services, distributed AI has a unique set of decision variables such as the model size, choice of algorithm, and selection of devices participating in the training task.\nHowever, to the best of our knowledge, no literature exists on the coexistence of distributed \\gls{ai} and \\gls{urllc} services in which both communication and learning aspects are jointly considered.\nDespite the lack of such analysis, numerous 6G \\gls{ai} applications have emerged recently to improve efficiency and system performance in various cyber-physical control applications such as industrial automation \\cite{dAIFaultDetectFactory}, and autonomous driving \\cite{vehicularFlSaad}. We believe the adoption and success of such applications depend highly on the analysis and optimization of these scenarios with existing \\gls{urllc} services, which serves as the main motivation of our work.\n\n\\subsection{Distributed \\gls{ai} over Wireless Networks}\nThe main challenges of running distributed training over wireless networks arise from its two main characteristics: i) the dynamics of wireless propagation, which depends on various factors in the network, such as noise, interference, and fading, and ii) resource (e.g., bandwidth and transmit power) scarcity \\cite{chen2021Survey}. The latter becomes even more significant since distributed training requires many iterations exchanging typically large models or model parameters \\cite{chen2021Survey}. From the communication service perspective, several recent studies have focused on resource management and device selection techniques. References \\cite{jsacPoorLowLatencyFL,optFlIIoT,chenJointLearningCommFL,dinhFEDL,denizUpdateAware} leverage resource management and, more explicitly, device selection to improve the performance of distributed learning in terms of training loss or convergence time. For example, in \\cite{chenJointLearningCommFL}, the authors evaluated the effects of resource management, device selection, and transmit power on \\gls{fl} convergence and optimized these wireless parameters to reduce \\gls{fl} training loss. Reference \\cite{dinhFEDL} proposes an \\gls{fl} algorithm that can handle heterogeneous user data assuming strong convex and smooth loss functions. The resource allocation is then optimized to improve the convergence of the proposed algorithm. In \\cite{denizUpdateAware}, it is shown that the performance of distributed learning can be improved if the device selection algorithm jointly considers the updates from the devices with their channel conditions. Nevertheless, none of these works address the mixed service scenario where the distributed learning performance is determined not only by the wireless network characteristics and limitations but also by the demands on the higher priority service.\n\n\\begin{comment}\n\\subsection{Potential Extra Related Works}\n \\red{References: \n\\begin{itemize}\n \\item \\cite{jsacPoorLowLatencyFL}: Joint device selection and channel assignment to minimize the training delay subject to participation ratio which is dependent on the clients' differential privacy and learning performance. The problem is addressed by multi-agent multiarmed bandit (MAMAB) framework.\n \\item \\cite{saadConvergOpt}: Joint resource allocation and device selection scheme to jointly minimize the FL convergence time and the FL training loss. The proposed a probablistic device selection scheme that allows the users whose local FL models have large impacts on the global FL model to associate with the BS with high probability.\n \\item \\cite{khaledJsacFLConvergWireless}: They evaluate the convergence rate of FL regarding the joint impact of communication and training. Combining it with the network model, they formulate the optimal scheduling problem for FL implementation for \\gls{iid}.\n \\item \\cite{jsacAdaptiveFL}, \\cite{chenJointLearningCommFL}, and \\cite{iccClientSelJapan} (FedCS: Heuristic to maximize the number of participants while keeping training delay under a threshold).\n \\item from practical perspective, the time that it takes to change the slice size should be considered (for global optimizations), and per-gNB orchestration is not sufficient.\n\\end{itemize}}\n\\end{comment}\n\\subsection{Contributions:}\nAs distinct services, the performance of both \\gls{urllc} and distributed \\gls{ai} over wireless networks have been widely investigated in the existing literature. However, the coexistence of \\gls{urllc}, with its stringent requirements, and distributed \\gls{ai} workflow, with its unique traffic model and performance characteristics, have not yet been discussed in the literature. Such coexistence introduces new fundamental challenges as well as unique trade-offs between \\gls{urllc} latency and availability on the one hand, and convergence time and accuracy of distributed \\gls{ai} on the other hand. When running the distributed \\gls{ai} training over some clusters, under ideal processing and communications assumptions, increasing the model size and/or the number of participating \\gls{ai} devices at each iteration will often lead to better convergence. \nHowever, when distributed learning is run over a set of wireless devices, because of interference and limited available bandwidth, various parameters (e.g., \\gls{sinr}, \\gls{per}, and queuing delays) might impact the training delay to the extent that the improvement in convergence rate may become nonessential.\nWhen \\gls{urllc} comes into the picture, the performance becomes bounded by two more factors, i.e., competing with higher priority traffic and \\gls{urllc} availability requirements. The latter is use case specific and, depending on the requirements' strictness and the use case sensitivity these requirements, increasing the model size and/or the number of participating \\gls{ai} devices can have a non-linear detrimental effect on the distributed AI performance.\n\nIn this paper, we focus on the understanding and optimizing the coexistence of distributed learning and \\gls{urllc} services. We introduce a soft synchronous distributed learning protocol in which the central node broadcasts the global model updates upon receiving local updates from a subset of the available devices. Then, leveraging \\gls{drl}, we develop a framework to dynamically select a set of participating \\gls{ai} devices in order to minimize the convergence time of the distributed learning task while maintaining the strict \\gls{urllc} availability requirements. \n\\textit{To the best of our knowledge, this is the first work that comprehensively studies and optimizes the underlying trade-offs between the \\gls{urllc} performance and the distributed \\gls{ai} workflow, simultaneously running on the same wireless network.} In summary, our contributions are as follows:\n\n\\begin{itemize}\n \\item We develop a model for the operational metrics of the \\gls{urllc} service (i.e., communication service availability) and essential parameters that characterize \\gls{ai} training workflow (i.e., training delay, model size, convergence, and accuracy) and investigate the interplay between them. Since the system is resource-limited (in terms of bandwidth and transmission power) and \\gls{urllc} availability requirements are strict, a subset of devices must be selected to perform each iteration of distributed \\gls{ai}. Accordingly, we formulate an optimization problem that minimizes the average training latency of the distributed \\gls{ai} to reach $\\epsilon$-accuracy while sustaining \\gls{urllc}'s communication service availability requirements.\n \n \n \\item We transform the formulated coexistence optimization problem into a \\gls{mdp} and design an action masking technique to put a lower bound on the minimum number of \\gls{ai} devices required to participate in each iteration of distributed training. However, the solution may select a higher number of devices than this minimum to address the so-called \\textit{straggling effect}.\n \n \\item To deal with the unknown dynamics of our complex cellular system, we propose a data-driven approach that optimizes the device selection policy via a state-of-the-art off-policy \\gls{drl} algorithm, namely\\gls{sac}. In our scheme, the device selection policy of each \\gls{ai} device is distributed across distinct neurons, resulting in a linear increase in the \\gls{sac} output size with the number of devices.\n \n \\item We evaluate our framework utilizing a \\gls{3gpp}-compliant 5G simulator in a factory automation use case. We observe that the number of participating \\gls{ai} devices can significantly impact the performance of \\gls{urllc}. Our results provides important insights for the ongoing standardization activities of distributed \\gls{ai}.\n\\end{itemize}\n\nThe rest of this paper is organized as follows. We provide the necessary preliminaries on the distributed learning, and our system model in Section~\\ref{sec:Background} and Section~\\ref{sec:systemModel}, respectively. We then formulate the problem in Section~\\ref{sec:PM}. Section~\\ref{sec:solution} presents the proposed \\gls{mdp} modeling and \\gls{drl}-based device selection.\nWe describe our simulation methodology in Section~\\ref{sec:simulation}, and discuss the results in Section~\\ref{sec:preformance}. Finally, Section~\\ref{sec:conculsions} concludes the paper.\n\n\n\\textit{Notations:} \nNormal font $x$ or $X$, bold font $\\bm{x}$, bold font $\\bm{X}$, and uppercase calligraphic font $\\mathcal{X}$ denote scalars, vectors, matrices and sets, respectively.\nWe denote by $[X]$ the set $\\{1,2,\\ldots,X\\}$, by $\\left[\\bm{x}\\right]_{i}$ element $i$ of vector $\\bm{x}$, by $\\left[\\bm{X}\\right]_{i,j}$ the element $ij$ of matrix $\\bm{X}$, and by $|\\mathcal{X}|$ the cardinality of set $\\mathcal{X}$. We define $\\mathds{1}\\{\\bm{x}\\}$ as the element-wise indicator function returning $\\bm{y}$, where $[\\bm{y}]_i$ takes 1 when condition $[\\bm{y}]_i$ holds, and 0, otherwise. The curled inequality ($\\succeq$ or $\\succ$) represents element-wise inequality. We use $\\bm{1}$ and $\\bm{0}$ to denote all-one and all-zero vectors, respectively.\n\\section{Background on Distributed Learning}\\label{sec:Background}\nConsider the problem of minimizing a sum of smooth functions $\\{f_{i}: \\mathbb{R}^d \\mapsto \\mathbb{R}\\}_{i \\in [N]}$, with corresponding gradients $\\{\\nabla f_{i}: \\mathbb{R}^d \\mapsto \\mathbb{R}^d\\}_{i \\in [N]}$:\n\\begin{equation}\\label{eq: MainOptimProblem}\n\\mbox {\\boldmath $w$}^\\star {\\coloneqq} \\min_{\\mbox {\\boldmath $w$}\\in\\mathbb{R}^d} f(\\mbox {\\boldmath $w$}) {=} \\min_{\\mbox {\\boldmath $w$}\\in\\mathbb{R}^d} \\frac{1}{N}\\sum\\nolimits_{i \\in [N]}f_i(\\mbox {\\boldmath $w$}).\n\\end{equation}\nSuch problems frequently arise in distributed learning where $N$ is the number of distributed devices, $f$ could express the global loss function, and each $f_i$ could represent a local loss function. In practice, to parallel the computations or to preserve the privacy of local datasets, we use distributed algorithms to solve~\\eqref{eq: MainOptimProblem}~\\cite{Bottou2018SIAM}. That is, at iteration $k$, a subset of the workers compute and upload their local gradients $\\{\\nabla f_i(\\mbox {\\boldmath $w$}_k)\\}_i$ to a central node, which updates the model and broadcasts the updated model parameters $\\mbox {\\boldmath $w$}_{k+1}$ back to the workers.\\footnote{We have a similar set of trade-offs and solutions for non-smooth functions, where we cannot define gradients. The major difference compared to this paper is that instead of updating based on gradients, we may need to update based on its generalizations, like subgradients~\\cite{scaman2018optimal}.} \\gls{fl} is another popular method in which the workers will run one or several local training passes before uploading their local models. The central node will then take a global average over them. The communication overhead is almost the same as uploading gradients \\cite{li2019federated}. However, most of these \\gls{ul} messages (gradients or local models) may be redundant, carrying almost no additional information since they can be retrieved from their past communicated messages as well as messages of other devices \\cite{ghadikolaei2021lena}. Forcing some of them to remain silent would i) reduce \\gls{ul} interference to other users, ii) increase throughput, and iii) improve latency. \n\n\nIn conventional synchronous distributed training methods, the central node waits until it receives the local updates from all participating devices, leading to a considerable inactive time at the central node as well as faster devices waiting for stragglers. To tackle the straggling problem, in $n$-sync approaches, the central node only waits for a subset of participating devices, say $n$ out of all $N$ devices, and updates the global model using their messages at every iteration \\cite{duttaKsync}. Nevertheless, vanilla $n$-sync-based methods add extra load on the underlying communication system, as they will ask all the devices to upload their data, and the central node starts its update with the first $n$ received data. Reference \\cite{ji2020dynamic} proposed an algorithm to adjust $n$ at every iteration. References~\\cite{fl,ghadikolaei2021lena,chen2018lag} proposed various approaches to eliminate some unnecessary uploads. However, none of those works study or optimize the interplay between distributed learning and other parallel communication services.\n\n\n\\section{System Model}\\label{sec:systemModel}\n\\subsection{Network Model}\\label{sec:networkModel}\nWe consider an industrial automation scenario, where a set of $\\mathcal{G}{\\coloneqq}[G]$ \\glspl{gnb}, each consisting of $1$ cell, serve a set of $\\mathcal{U}{\\coloneqq}[U]$ industrial devices in the factory hall execute different functions that enable automated production. The communication system should timely and reliably deliver i) monitoring data to \\glspl{gnb} and ii) computed or emergency control commands to the actuators.\n\nFor simplicity, we assume that the \\gls{ai} devices are distinct from the industrial devices, and there exist a set of $\\mathcal{N} {\\coloneqq} [N]$ \\gls{ai} devices. Moreover, we assume that the \\gls{ai} central node needs to receive the relevant local information from $n$ out of these $N$ \\gls{ai} devices to update its global model at each iteration. To tackle the straggler effect, the \\gls{ai} central node might request an update from $\\mathcal{N}_{m,k} \\subseteq \\mathcal{N}$ at iteration $k$, where $|\\mathcal{N}_{m,k}| {=} m_k({\\geq} n)$ of the devices to participate in the training. Hence, at $k$th iteration, the central node might request $m_k{-}n$ extra backup devices to mitigate the straggler problem in a synchronous distributed learning scenario.\n\nTo manage the coexistence of two services, \nwhere the priority of services are inherently different, 5G and beyond 5G envision two approaches. The first approach, employed in this paper, is to use the existing standardized protocols in 5G-NR for \\gls{qos} handling. In this case, each connected device is assigned with one or several \\gls{qos} flows and data radio bearers, where the former is set in the core network, depending on the service \\gls{qos} requirements. For example, in our scenario, the traffic from/to \\gls{urllc} devices is set to have high priority \\gls{qos} flow to ensure low latency, whilst the traffic from/to \\gls{ai} devices is set to have low priority \\gls{qos} flow. Each (or several) of these \\gls{qos} flows are then mapped to a data radio bearer in the \\gls{ran}. In \\gls{gnb} and devices, there is an associated \\gls{rlc} buffer to each data radio bearer, and in our case, with strict priority scheduling \\cite{dahlman5GNr}. The second approach is to have separate slices for \\gls{urllc} and distributed \\gls{ai}, resulting in full resource separation (e.g., in terms of bandwidth).\n\n\n\\subsection{Distributed Learning Process}\\label{sec:distAlg}\nWe consider a network of $N$ \\gls{ai} devices that cooperatively solve a distributed learning problem. Assuming that $\\mathcal{N}_{n,k} \\subseteq \\mathcal{N}_{m,k}$ is the subset of size $n$ whose updates the central node receives first at iteration $k$, then iteration $k$ of an abstract distributed algorithm reads:\n\\vspace{-2mm}\n\\begin{subequations}\\label{eq:dAI}\n\\begin{align}\n &\\mbox {\\boldmath $w$}_{k+1}{=}A{\\left(\\mbox {\\boldmath $c$}_{i,k}, \\mbox {\\boldmath $w$}_k\\right)},\\quad \\mbox{for} \\quad \\forall i \\in \\mathcal{N}_{n,k} \\label{eq:globalUp}\\\\\n & \\mbox {\\boldmath $c$}_{i,k}{=}C_i{\\left(\\mbox {\\boldmath $w$}_{k}\\right)},\\quad \\mbox{for} \\quad \\forall i \\in \\mathcal{N}_{m,k} \\label{eq:localUp}\n\\end{align}\n\\end{subequations}\nwhere function $A$ represents an algorithm update of the decision variable $\\mbox {\\boldmath $w$}_k$, function $C_i$ picks out the relevant information, $\\mbox {\\boldmath $c$}_{i,k}$, that node $i$ uploads to the server to run the algorithm. This general algorithmic framework covers many \\gls{ml} algorithms, including \\gls{fl} and \\gls{dgd}, with or without data compression. For example, when $C_i$ returns a stochastic gradient, say $\\widehat{\\nabla} f_i(\\mbox {\\boldmath $w$}_{k})$, and $A=\\mbox {\\boldmath $w$}_k - \\eta \\sum_i \\widehat{\\nabla} f_i(\\mbox {\\boldmath $w$}_{k})/n$ for some positive step size $\\eta$, we recover $n$-sync and synchronous \\gls{dgd} for $n({<} N)$ and $n({=} N)$ \\cite{duttaKsync}, respectively. When $C_i$ returns an updated local model parameters of \\gls{ai} device $i$ and $A$ takes an averaging step over a subset of $n ({\\leq} N)$ \\gls{ai} devices, we recover FL ($n$-sync or synchronous). Without loss of generality, and for the sake of simplicity, we assume that the gradients' noise are \\gls{iid} \\cite{gradientNoise}.\n\n\n\\subsection{Channel Model}\nTo model the channel, we consider a \\gls{mimo} system in which we leverage the time varying 3D spatial channel model from \\gls{3gpp} in \\cite{3GPP38901}. In this model, channels are characterized via clustering the multipath components, arriving at antenna arrays, in delay and double-directional angle (i.e., the zenith and azimuth of the \\glspl{aoa} at the receiver and \\glspl{aod} at the transmitter). For simplicity, let us assume that $N_\\mathrm{g}$ and $N_\\mathrm{d}$ are respectively the number of antenna elements of \\gls{gnb} and devices.\nWe denoted by $\\bm{H}_{x,y}(\\tau;t) \\in \\mathds{C}^{N_\\mathrm{d}\\times N_\\mathrm{g}}$ the baseband channel response at time $t$ to an input impulse at time $t-\\tau$, between $x$th device and $y$th \\gls{gnb} in \\gls{dl}.\nThen, an entry of $\\bm{H}_{x,y}(\\tau;t)$ for $p$th receiving antenna element and $q$th transmitting antenna element can be computed as\n\\begin{multline}\\label{eq:channel}\n\\left[\\bm{H}_{x,y}{\\left(\\tau;t\\right)}\\right]_{p,q} {\\coloneqq} \\sum_{l=1}^{N_\\mathrm{c}} \\sqrt{\\beta_{l}^{x,y}} \\sum_{s=1}^{N_\\mathrm{s}} {\\left(\\bm{g}_{p}^{x,y}{\\left(t,l,s\\right)}\\right)}^\\intercal \\bm{F}_{\\mathrm{xp}}^{x,y}{\\left(t,l,s\\right)}\\\\ \\bm{g}_{q}^{x,y}{\\left(t,l,s\\right)}e^{j\\Upsilon_{p,q}^{x,y}{\\left(t,l,s\\right)}}\\delta{\\left(\\tau-\\tau_{\\mathrm{p},l,s}\\right)},\n\\end{multline}\nwhere $N_{\\mathrm{c}}$ and $N_{\\mathrm{s}}$ are respectively the number of clusters and rays, and $\\beta_{l}^{x,y}$ is a function of path loss, shadowing and $l$th cluster normalized power. Besides, ${\\bm{g}_{p}^{x,y}\\!{\\left(\\cdot\\right)}}$ is the field patterns of $p$th receiving element that $s$th ray of $l$th cluster has in the direction defined by arriving zenith and azimuth angles, $\\bm{F}_{\\mathrm{xp}}^{x,y}\\!{\\left(\\cdot\\right)}$ is $2{\\times}2$ matrix modeling the cross polarization power ratio for $s$th ray of $l$th cluster, $\\bm{g}_{q}^{x,y}\\!{\\left(\\cdot\\right)}$ is the field patterns of $q$th transmitting element that $s$th ray of $l$th cluster has in the direction defined by departing zenith and azimuth angles, $\\Upsilon_{p,q}^{x,y}{\\left(\\cdot\\right)}$ is a function of location vector of $p$th receiving and $q$th transmitting element as well as the Doppler frequency, and finally, $\\tau_{\\mathrm{p},l,s}$ is the propagation delay of $s$th ray in $l$th cluster. For \\gls{ul}, $\\bm{H}_{x,y}{\\left(\\tau;t\\right)}$ can be derived by swapping $p$ and $q$ in \\eqref{eq:channel}.\n\nNote that although we leverage the \\gls{3gpp} statistical spatial channel model~\\cite{3GPP38901}, our problem formulation (Section~\\ref{sec:problemFor}) and solution approach (Section~\\ref{sec:solution}) are general and not limited to this channel model. In the next section, we use these models to formulate our performance metrics.\n\\section{Performance Metrics and Problem Formulation}\\label{sec:PM}\n\\subsection{\\gls{urllc} Metric: Communication Service Availability}\\label{sec:urllcKpi}\nThe players in operational, information and communication technologies are entering new territory in which 5G is utilized to connect industries. The main challenge in such a merger is ensuring that the operational requirements are fulfilled during a 5G system’s operating phase \\cite{5gAcia}.\nOne well-accepted metric in the operational technology domain is availability. Hence, \\gls{3gpp}, as the primary standardization consortium for mobile telecommunications, has attempted to specify the requirements for communication service availability from the application layer perspective in \\cite{3GPP22104,3GPP22261}.\nThe main difference between the end-to-end communication service performance and the observed performance on the network layer is driven by a system parameter called \\textit{survival time}, $T_{\\mathrm{sv}}$. Survival time is the duration of time for which the application layer can tolerate failures in the communication system without any performance degradation in availability \\cite{ganjPimrcTranslation}.\nWe denote the network layer state by a Bernoulli random variable $X_i^{\\Gamma}{\\left(t\\right)}$, where $\\Gamma{\\in}$\\{\\gls{ul}, \\gls{dl}\\}, and $X_i^{\\Gamma}{\\left(t\\right)}$ for the $i$th \\gls{urllc} device is zero if the last packet is not received at the communication interface within a specified delay bound, because either it could not be decoded at the lower layers or faced excessive retransmission, and/or queuing delays.\nConsequently, assuming that application recovery time is negligible, we define the per-device application layer state variable, $Y_i^{\\Gamma}{\\left(t\\right)}$ as \n\\begin{equation}\\label{eq:appState}\nY_i^{\\Gamma}{\\left(t\\right)}{\\coloneqq}\n \\begin{cases}\n 0, & \\mathrm{if} \\int_{t-T_{\\mathrm{sv}}}^t X_i^{\\Gamma}\\!{\\left(\\tau\\right)}d\\tau = 0,\\\\\n {1,} & \\mathrm{otherwise}. \\\\\n \\end{cases}\n\\end{equation}\nTherefore, we can define the long-term communication service availability for the $i$th \\gls{urllc} device in $\\Gamma$ direction as \\cite{ganjGCOrch}\n\\begin{equation}\\label{eq:availInf}\n \\alpha_i^{\\Gamma}{\\coloneqq} \\lim_{t\\to \\infty}\\Pr\\left\\{Y_i^{\\Gamma}\\!{\\left(t\\right)} {=} 1\\right\\} {=} \\lim_{T\\to\\infty}\\frac{1}{T}\\int_{0}^T Y_i^{\\Gamma}\\!{\\left(t\\right)} dt.\n\\end{equation}\nThe availability in $\\Gamma$ direction can be estimated over a short time period using\n\\begin{equation}\\label{eq:availEst}\n \\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_k\\right)} {\\coloneqq} \\frac{1}{\\Delta t_k}\\int_{t_k}^{t_k+\\Delta t} Y_i^{\\Gamma}\\!{\\left(t\\right)} dt.\n\\end{equation}\nIn \\gls{urllc}, the requirement is often defined in the form of \\cite{popovskiUrllc}\n\\begin{equation}\\label{eq:availReq}\n \\Pr\\left\\{\\alpha_i^{\\Gamma} \\leq \\alpha_i^{\\mathrm{req}}\\right\\}\\leq\\gamma, \\enspace\\forall i\\in \\mathcal{U},\\\n\\end{equation}\nwhere $\\alpha_i^{\\mathrm{req}}$ is the communication service availability requirement of the use case that \\gls{urllc} device $i$ belongs to, and $\\gamma$ is the sensitivity of this use case to $\\alpha_i^{\\mathrm{req}}$. We follow \\cite{3GPP22104} in assuming that the requirement for \\gls{ul} and \\gls{dl} availability is the same for a given use case.\n\n\n\\subsection{Distributed AI Metrics: Training Delay and Accuracy}\\label{sec:dAIKpi}\nThe performance of the distributed \\gls{ai} can be characterized by two factors: training delay (or convergence time) and training accuracy. \n\nThe convergence time of distributed \\gls{ai} algorithms is bounded by the communication and processing latency \\cite{saadConvergOpt}. Let us denote the \\gls{ai} device selection at iteration $k$ by an indicator vector of $\\mathcal{N}_{m,k}$ as $\\bm{\\pi}_k^{\\mathrm{u}}{=}\\left[\\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_1, \\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_2, \\ldots, \\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_N\\right]$, where $\\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_i {\\in} \\{0,1\\}, \\forall i{\\in} \\mathcal{N}$. Assuming that the central node requests a subset $\\mathcal{N}_{m,k}$ (i.e., $\\bm{1}^T \\bm{\\pi}_k^{\\mathrm{u}} {=}m_k$) to participate in the training while it waits for $n(\\leq m_k, \\forall k\\in [K])$ local gradients/models, then the \\gls{ai} training delay in the central node for the $k$th iteration, $d_k^{\\mathrm{AI}}$, can be derived as\n\\begin{equation}\\label{eq:aiTrainingDelay}\nd_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)} {\\coloneqq} \\min\\!{\\left\\{\\!\\min_{\\substack{\\mathcal{N}_{n,k}\\subseteq\\mathcal{N}_{m,k},\\\\|\\mathcal{N}_{n,k}|{=}n}}{\\!\\!\\left(\\max_{i\\in\\mathcal{N}_{n,k}}{\\!\\!\\left(d_{i,k}^{\\mathrm{D}}{+}d_{i,k}^{\\mathrm{pr}}{+}d_{i,k}^{\\mathrm{U}}\\right)}\\right)}{+}d_k^{\\mathrm{pr}}, T^{\\max}\\!\\right\\}},\n\\end{equation}\nwhere $\\bm{\\Pi}_k^{\\mathrm{u}}{\\coloneq} \\left[\\bm{\\pi}_1^{\\mathrm{u}}, \\ldots, \\bm{\\pi}_k^{\\mathrm{u}}\\right]$ is the device selection matrix, $d_{i,k}^{\\mathrm{D}}$, $d_{i,k}^{\\mathrm{pr}}$, and $d_{i,k}^{\\mathrm{U}}$, are the latency of \\gls{dl} transmission of the global model, local training (represented in \\eqref{eq:localUp}), and \\gls{ul} transmission of local gradients/models for $k$th iteration of the $i$th device, respectively. It is worth noting that $d_{i,k}^{\\mathrm{D}}$ and $d_{i,k}^{\\mathrm{U}}$ include the transmission processing, payload transmission, occurred retransmissions, and queuing delay (which is determined by the number of devices sharing the same time-frequency resources in the current and previous \\glspl{tti}), and thus, are a function of $\\bm{\\Pi}_k^{\\mathrm{u}}$. Besides, $d_k^{\\mathrm{pr}}$ is the $k$th iteration processing delay required to perform the global model update on the central node, represented in \\eqref{eq:globalUp}. Thus, in \\eqref{eq:aiTrainingDelay}, for each subset of $\\mathcal{N}_{n,k}$ with cardinality of $n$, the maximum aggregated communication and processing delay is calculated among devices. Then, among subsets, $d_k^{\\mathrm{AI}}{(\\cdot)}$ is determined by picking the subset with the lowest delay. However, to avoid an infinite waiting time in the central node, we define $T^{\\mathrm{max}}$ as the maximum permissible delay of every iteration. \\figurename\\,\\ref{fig:dAiWorkflow} demonstrates the training delay in $n$-sync distributed training.\n\\begin{figure}[t]\n\t\\centering\n\t\\includegraphics[width=.99\\columnwidth,keepaspectratio]{./Components/Figs/JSAC/delayFig.pdf}\n\n\t\\caption{The illustration of training delay in distributed AI workflow.}\n\t\\label{fig:dAiWorkflow}\n\t\\vspace{-6mm}\n\\end{figure}\n\nTraining accuracy is another performance metric for a distributed learning task. We can find ``critical points\" as the set of points where the norm of their derivative is 0. The local minima and maxima are a subset of these points. In general, under some regularity assumptions, we can often converge to an approximate critical point (a point wherein the norm of the gradients gets smaller than some positive $\\epsilon$). Note that a critical point may not be an optimal point, e.g., a saddle point. We denote by $K_{\\min}$ the iteration number after which the algorithm converges. As we will show in Section~\\ref{sec:solution}, our design depends on $K_{\\min}$. In the following, we provide a few examples in which we can formulate $K_{\\min}$. \nAll examples are formally defined and presented in Appendix~\\ref{app:1}-\\ref{app:3}.\n\n\n\\begin{example}\\label{ex1}\n\\textit{\\gls{dgd}, strongly convex \\cite{Bottou2018SIAM}:} Under smoothness and strong convexity assumptions of the objective functions, as well as a few more technical assumptions \\cite{Bottou2018SIAM}, we can show that the minimum number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy\\footnote{\\label{convexFootnote}In the case of a convex function, a critical point is the global minima.} of the objective functions fulfills the following inequality:\n\\begin{equation}\\label{eq:kConvexSimple}\nK_{\\min} \\geq \\log_{b}{\\left(\\frac{W^{\\mathrm{A}}}{\\epsilon{-}\\frac{z^{\\mathrm{A}}}{n}}\\right)} +1,\n\\end{equation}\nwhere $n$ is the number of participants in the global averaging step, $W^{\\mathrm{A}}$ is a positive constant representing the initial distance to the minimal value of the global loss function, $f{(\\mbox {\\boldmath $w$}^\\star)}$, and $z^{\\mathrm{A}}$ is a positive constant which depends on the learning rate, Lipschitz constant, strong convexity, and the variance of gradient noise when $n{=}1$.\n\\end{example}\n\n\n\\begin{example}\\label{ex2}\n\\textit{\\gls{dgd}, non-convex \\cite{Bottou2018SIAM}:} Under the smoothness assumption of the objective functions, we can show that the minimum number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy to a critical point, fulfills the following inequality:\n\\begin{equation}\\label{eq:kNonConvexSimple}\nK_{\\min}\\geq \\frac{W^\\mathrm{B}}{\\epsilon{-}\\frac{z^\\mathrm{B}}{n}},\n\\end{equation}\nwhere $n$ is the number of participants in the global averaging step, $W^\\mathrm{B}$ is a positive constant which depends on the initial distance to the lower bound of $f$, and $z^\\mathrm{B}$ is a positive constant which is a function of the learning rate, Lipschitz constant, and the variance of gradient noise when $n{=}1$.\n\\end{example}\n\n\\begin{example}\\label{ex3}\n\\textit{\\gls{fl} Algorithm \\cite{flConvergNoniid}:} Consider a \\gls{fl} algorithm with $E$-step local iterations, $N$ devices with \\gls{iid} datasets, global model averaging with $n ({\\leq N})$ randomly selected local models, smooth and strongly convex objective functions. We can show that the minimum required number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy\\footnoteref{convexFootnote} of the objective functions would scale with\n\\begin{equation}\\label{eq:flEpsAccuracy}\nK_{\\min} \\propto \\frac{1}{\\epsilon}\\left[\\left(1+\\frac{1}{n}\\right)EG^2 + \\frac{\\frac{\\sigma^2}{N}+G^2}{E}+G^2\\right],\n\\end{equation}\nwhere $G^2$ and $\\sigma^2$ are the upper bounds on the second moment of the gradient estimates and gradient noises, respectively, in different \\gls{ai} devices.\n\\end{example}\n\nIn \\eqref{eq:kConvexSimple}-\\eqref{eq:flEpsAccuracy}, it is clear that the number of iterations that distributed \\gls{ai} requires to reach $\\epsilon$-accuracy (i.e., $K_{\\min}$) decreases as the required number of devices participating in the global update (i.e., $n$) increases in $n$-sync scheme.\nIn the next section, we show how to use $K_{\\min}$ in our solution approach. \n\\subsection{Problem Formulation} \\label{sec:problemFor}\nHaving defined the system model and the \\glspl{kpi} of interest, the next step is to design a device selection scheme to optimize the distributed \\gls{ai} training process over a wireless network, while still fulfilling the \\gls{urllc} availability requirements. Then, from the definition of $d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}$ in \\eqref{eq:aiTrainingDelay}, the availability requirement in \\eqref{eq:availReq}, and assuming $n$-sync scheme for distributed \\gls{ai} procedure, the joint optimization problem for distributed \\gls{ai} implementation over a wireless network can be expressed as follows:\n\\begin{subequations}\\label{eq:opt1UserSel}\n\\begin{alignat}{3\n&\\!\\min_{\\bm{\\Pi}_K^{\\mathrm{u}}} & & \\sum_{k=1}^{K} {d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}}\\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)},\\tag{\\ref{eq:opt1UserSel}}&\\label{eq:UserSelObj}\\\\\n&\\text{s.t.} & & \\Pr\\left\\{\\alpha_{i}^{\\Gamma} \\leq \\alpha_{i}^{\\mathrm{req}}\\right\\}\\leq\\gamma, \\enspace\\forall i \\in \\mathcal{U}, \\forall \\Gamma \\in \\{\\mathrm{UL,DL}\\}, &\\label{eq:UserSelC1}\\\\\n& & & \\bm{1}^\\intercal\\bm{\\pi}_k^\\mathrm{u} \\geq n, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC2}\\\\\n& & & \\bm{\\pi}_k^\\mathrm{u} \\in \\{0,1\\}^{N}, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC3}\\\\\n& & & \\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)} \\in \\{0,1\\}, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC4}\n\\end{alignat}\n\\end{subequations}\nwhere\n$\\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}$ is a binary variable taking $1$ for the iteration numbers at which the iterative algorithm has not reached $\\epsilon$-accuracy (e.g., $\\forall k{\\in} [K_{\\min}{-}1]$ for Example \\ref{ex1}-\\ref{ex3}), and $K$ should be selected sufficiently large within which the distributed \\gls{ai} algorithm's $\\epsilon$-accuracy is ensured. To fulfill the required number of local updates in the global update of \\eqref{eq:dAI} in the $n$-sync scheme, \\eqref{eq:UserSelC2} enforces the central node to select at least $n$ \\gls{ai} devices and yet it is flexible to select any number of extra devices to tackle straggling problem (i.e. $m_k = \\bm{1}^\\intercal\\bm{\\pi}_k^\\mathrm{u}$, and $m_k\\in[n, n+1, \\ldots, N], \\forall k\\in[K]$).\nMoreover, \\eqref{eq:UserSelC3} indicates that device selection policy is a binary vector. Note that \\eqref{eq:UserSelC2}, \\eqref{eq:UserSelC3}, and \\eqref{eq:UserSelC4} must be respected at all decision time epochs. \n\nOn the one hand, the communication service availability of each \\gls{urllc} device is a function of the channel state variable, $X_{i}^{\\Gamma}{(t)}$, and the end-to-end delay of its packets. These two depend on many variables, such as instantaneous \\gls{sinr}, path gain, code rate, and transmission buffer status. On the other hand, these variables also impact the delay of the selected \\gls{ai} devices, influencing the training delay for each iteration. In addition, and since the \\gls{urllc} service has higher priority than the distributed \\gls{ai} service, the amount of \\gls{urllc} traffic being served on the corresponding \\gls{gnb} severely affects the distributed \\gls{ai} training delay. However, joint modeling of these \\gls{urllc} and distributed \\gls{ai} \\glspl{kpi} is highly complex and mandates significant assumptions on the queue models, channel, and traffic.\n\n\\section{Transformation to \\gls{mdp} Problem}\\label{sec:solution}\nThe optimization problem \\eqref{eq:opt1UserSel} is a non-convex optimization problem. In addition, characterizing the impact of $\\bm{\\pi}_k^\\mathrm{u}$ on our \\glspl{kpi} necessitates explicit modeling of the channel and queues, which involves approximations that may not be accurate in practice. Therefore, we propose to model the device selection problem \\eqref{eq:opt1UserSel} as an finite horizon \\gls{mdp}. Consequently, in Section~\\ref{sec:mdp}, we specify the state space, $\\mathcal{S}$, action space, $\\mathcal{A}$, and set of all possible rewards, $\\mathcal{R}$, by dynamic interactions between the central node and \\gls{ran} environment. Nevertheless, it is not possible to derive the transition probability function ($p: \\mathcal{S}\\times\\mathcal{R}\\times\\mathcal{S}\\times\\mathcal{A} \\mapsto [0,1]$) in our complex and dynamic environment. Hence, in Section~\\ref{sec:sac}, we solve our device selection problem with a model-free \\gls{drl} algorithm, namely \\gls{sac}, to address the finite horizon fully observable discounted \\gls{mdp} problem.\n\nOn the path to transforming optimization problem \\eqref{eq:opt1UserSel} into a \\gls{mdp} problem, we note that our two services are on different time scales. In other words, \\gls{urllc} performance should be measured based on the actual time, whilst each time step for the control loop is as long as one iteration of the distributed AI algorithm takes (i.e., $d_k^{\\mathrm{AI}}{\\left(\\cdot\\right)}$ for $k$th iteration). Hence, the control loop is not periodic in actual time, and is triggered by central node. Accordingly, we use $\\Delta t_k$ (i.e., $t_{k+1}{-}t_{k}$) wherever necessary to emphasize the time instants iteration $k$ begins and ends.\n\n\\subsection{\\gls{mdp}}\\label{sec:mdp}\nThe essential elements of the \\gls{mdp} are determined as follows.\n\\subsubsection{State Space, $\\mathcal{S}$} \\label{sec:state}\nThe state space characterizes the environment. We categorize the environment's state at iteration $k$ (i.e., $s_k\\in\\mathcal{S}$), into three classes i) the observations from each \\gls{urllc} device, ii) the observations from each \\gls{ai} device, and the observations from each \\gls{gnb}. In the following, we describe these three classes.\n\n\\noindent\\textbf{\\gls{urllc} \\gls{qos} variables:}\nCommunication service availability of each \\gls{urllc} device, as the main \\gls{urllc} \\gls{kpi}, is a function of \\gls{per}, mean downtime, and survival time \\cite{ganjPimrcTranslation,ganjGCOrch}. Except for survival time which is static and use case specific, the state should include both (\\gls{ul}/\\gls{dl}) \\gls{per} and (\\gls{ul}/\\gls{dl}) mean downtime, estimated via empirical measurements within $\\Delta t_{k-1}$. In addition to these measures that explicitly affect the communication service availability, cell association, (\\gls{ul}/\\gls{dl}) buffer size (at $t_k$), (\\gls{ul}/\\gls{dl}) \\gls{sinr}, and (\\gls{ul}/\\gls{dl}) delay are other metrics that implicitly impact the availability. However, \\gls{sinr} and delay statistics might vary significantly during, possibly long, $\\Delta t_{k-1}$. Hence, we represent their distribution using specific statistics of these measures, i.e., $1$st percentile, $5$th percentile and median of the \\gls{sinr} distribution, and $95$th percentile, $99$th percentile, and median of the delay distribution. In fact, utilizing such percentiles is well motivated by \\gls{urllc} extreme availability performance, which, under proper system design, is affected by the tail of delay and \\gls{sinr} distributions \\cite{URLLCTRS}. \n \n\\noindent\\textbf{Distributed \\gls{ai} delay variables:}\nThe training delay of each iteration, $d_k^{\\mathrm{AI}}{(\\bm{\\Pi}_k^{\\mathrm{u}})}$, is a function of the \\gls{ai} device selection; thus, we include a binary variable in the state indicating if this device has been selected in the last iteration. As indicated by \\eqref{eq:aiTrainingDelay}, the delay of the \\gls{dl} transmission of the global model, $d_{i,k}^{\\mathrm{D}}$, and the delay of the \\gls{ul} transmission of local gradients/models, $d_{i,k}^{\\mathrm{U}}$, directly impacts the training delay, and thus, should be included in the state. Besides, (\\gls{ul}/\\gls{dl}) buffer size (at $t_k$), and (\\gls{ul}/\\gls{dl}) \\gls{sinr} of the underlying transmissions has an implicit effect on delay, and therefore, we include them in the state. Focusing on the overall statistics (unlike \\gls{urllc} service), we represent the \\gls{sinr} distribution of the underlying transmissions for each \\gls{ai} device with its $5$th percentile, median and $95$th percentile.\nNote that no empirical measurement exists for $d_{i,k}^{\\mathrm{D}}$, $d_{i,k}^{\\mathrm{U}}$, and \\gls{sinr} of \\gls{ai} devices whose central node does not receive their local models in the $k$th iteration (i.e., $i\\notin \\mathcal{N}_{n,k}$). Moreover, if an \\gls{ai} device was selected at $t_{k-1}$, but its local update is not part of the first $n$ received local updates, its buffer size at $t_k$ would not be empty.\n\n\\noindent\\textbf{\\gls{gnb} level observations:}\nOn the \\gls{gnb} level, the number of resource blocks each service has consumed (in both \\gls{ul} and \\gls{dl} directions) significantly impacts both training delay and availability. Therefore, we propose to include the mean number of allocated resource blocks (per slot), within $\\Delta t_{k-1}$, for each service in the state.\n\n\\subsubsection{Action Space, $\\mathcal{A}$} \\label{sec:actionMDP}\nThe action space, $\\mathcal{A}$, is the set of all possible decision variables by which the \\gls{drl} agent interacts with the environment. Considering \\eqref{eq:opt1UserSel}, our action vector at the $k$th iteration should be the device selection vector $\\bm{\\pi}_k^{\\mathrm{u}}$. However, to mask out selections that do not follow condition \\eqref{eq:UserSelC2}, we define $\\bm{a}_k$ as a continuous vector where each element represents the action for an \\gls{ai} device (i.e., $\\bm{a}_k\\in \\left[-1,1\\right]^N$). Then, the mapping from $\\bm{a}_k$ to $\\bm{\\pi}_k^{\\mathrm{u}}$ is determined by\n\n\\begin{equation} \\label{eq:actionToselection}\n \\bm{\\pi}_k^{\\mathrm{u}}\\coloneq\\begin{cases}\n \\mathds{1}\\{\\bm{a}_k\\succeq \\bm{0}\\}, & \\text{if $\\bm{1}^\\intercal\\mathds{1}\\{\\bm{a}_k\\succeq \\bm{0}\\}\\geq n$},\\\\\n \\mathds{1}\\{\\bm{a}_k\\succeq a_{k}^{(n)}\\bm{1}\\}, & \\text{otherwise},\n \\end{cases}\n\\end{equation}\nwhere $a_{k}^{(n)}$ is the $n$th largest element in vector $\\bm{a}_k$.\n\n\\subsubsection{The Reward Function, $r$} \\label{sec:reward}\nIn general, the \\gls{drl} agent follows an explicit goal, i.e., to maximize its cumulative discounted rewards. In other words, the reward function, $r_{k+1}$, is the payoff for taking action $\\bm{a}_k$ at state $\\bm{s}_k$. \nAs \\eqref{eq:availInf} indicates, the communication service availability of \\gls{urllc} devices is measured in infinite time while the temporal granularity of the \\gls{drl} is determined by the amount of time each distributed \\gls{ai} training round takes, $\\Delta t_k$. Therefore, we suggest using the availability estimator from Equation \\eqref{eq:availEst} as part of the reward function. \nAlthough estimating such a long-term measure over a short period may be imprecise, it does reflect the consequence of the device selection policy as the application layer observes it in the near future. Given the optimization objective \\eqref{eq:opt1UserSel} and condition \\eqref{eq:UserSelC1}, we define the reward for iteration $k+1$, $r_{k+1}$, as\n\\begin{multline}\n r_{k+1} \\coloneqq \\upsilon\\exp{\\left(\\zeta\\min\\left\\{\\min_{\\substack{i\\in\\mathcal{U},\\\\\\Gamma \\!{\\in}\\! \\{\\mathrm{UL},\\mathrm{DL}\\}}}\\!\\!\\!\\!\\!{\\left(\\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_{k}\\right)}-\\alpha_i^\\mathrm{req}\\right)},0\\right\\}\\right)} \\\\ \n + \\left(1-\\upsilon\\right) \\frac{T^{\\mathrm{max}} - d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}}{T^{\\mathrm{max}}},\\label{eq:reward}\n\\end{multline}\nwhere $\\upsilon {(\\in [0,1])}$ is the weight characterizing the relative importance between \\gls{urllc} reward and distributed \\gls{ai} reward. In the \\gls{urllc} reward, $\\left(\\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_k\\right)}-\\alpha_i^\\mathrm{req}\\right)$ is negative for those \\gls{urllc} devices that did not meet their corresponding availability requirement within $\\Delta t_k$, regardless of the transmission direction, $\\Gamma$. Hence, our reward function design enforces the \\gls{drl} agent to maximize the availability of the worst \\gls{urllc} device among those that do not meet their availability requirements. In addition, $\\zeta$ is a design parameter which is a function of the sensitivity ($\\gamma$), and the precision that the maximum availability requirement needs (i.e., $\\zeta {\\propto} \\max_i\\!{\\left(a_i^{\\mathrm{req}}\\right)}{/}\\gamma$).\nNevertheless, the device selection policy gets the full reward on the \\gls{urllc} part (i.e., $\\upsilon$) when all of the devices fulfill their availability requirements. For the distributed \\gls{ai} reward, the shorter $d_k^{\\mathrm{AI}}\\left(\\cdot\\right)$ is, the larger the $\\bm{\\pi}_k^{\\mathrm{u}}$'s reward becomes. Moreover, to minimize the tail of the per-device availability distribution and the average training delay, \\gls{urllc} reward decreases exponentially while the reduction in distributed \\gls{ai} reward is linear.\n\n\\subsection{Solution With Soft Actor-Critic Based Algorithm}\\label{sec:sac}\nIn this paper, we employ \\gls{sac} to solve the device selection \\gls{mdp} problem in the coexistence scenario formulated above. The following characteristics of \\gls{sac} benefit our scenario~\\cite{sac}\ni) \\gls{sac} is an off-policy model-free \\gls{drl} algorithm in which explorations seek to find an optimal policy maximizing not only the expected accumulated discounted reward but also the expected entropy at each visited state, ii) \\gls{sac} has an actor-critic architecture where the critic \\gls{dnn} estimates state-action pair values, while the actor \\gls{dnn} computes the policy, and iii) SAC conquers the massive sampling complications and minimizes the sensitivity of \\gls{drl} in hyperparameters. In the rest of this section, we describe \\gls{sac} in \\cite{sac} for our device selection problem according to our actual implementation.\n\nThe main objective in \\gls{sac} is to find an optimal stochastic policy that maximizes the discounted sum of reward and entropy. For our device selection problem, the action space $\\mathcal{A}$ has $N$ dimensions (refer to Section~\\ref{sec:actionMDP}). Let us denote the action space of the $i$th dimension, which corresponds to the action space of the $i$th \\gls{ai} device with $\\mathcal{A}_i$. Hence, the optimal stochastic policy $\\bm{\\pi}^\\star{(\\cdot|\\bm{s})}$, $\\forall \\bm{s}\\in\\mathcal{S}$, maps the state $\\bm{s}$ to a vector of probability distributions, each over the dimension of the action space (i.e., $\\mathcal{A}_i$). Thus, the objective of \\gls{sac} i\n\\begin{align}\\label{eq:sacEntropy}\n\\bm{\\pi}^\\star{\\left(\\cdot|\\cdot\\right)} \\coloneq \\argmax_{\\bm{\\pi}\\left(\\cdot|\\cdot\\right)} \\mathbb{E}{\\left[\\sum_{k=1}^{K} \\lambda^{k-1} {\\left[r_{k+1}+\\psi \\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)}\\right]}\\right]}, \n\\end{align}\nwhere $K$, $\\lambda {(\\in[0,1])}$ and $\\psi{(>0)}$ are the episode length, discount factor, and the temperature parameter specifying the relative importance between the reward and entropy terms, respectively. Furthermore, $\\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)} \\coloneqq \\mathbb{E}{\\left[-\\ln\\!{\\left(\\bm{\\pi}{(\\bm{a}|\\bm{s}_k)}\\right)}\\right]}$ is the entropy of policy $\\bm{\\pi}$ at state $\\bm{s}_k$. Introducing entropy in \\eqref{eq:sacEntropy} guides the policy to explore more broadly while avoiding blatantly unfavorable trajectories.\n\nWe employ prioritized experience replay originally developed for \\gls{dqn} \\cite{prioReplay}. In this approach, transitions with greater temporal disparities are repeated more frequently, adding a bias toward those action. To address this issue, we remove this priority during training.\nMoreover, the proposed \\gls{sac}-based algorithm framework makes use of target networks and clipped double Q-learning, both of which established for \\gls{td3}. According to \\cite{td3}, such additions can mitigate overestimation in value approximation while assuring convergence to a suboptimal policy.\n\nThe \\gls{sac} algorithm (and actor-critic methods in general) uses policy iteration, in which the algorithm alternates between policy evaluation (to compute the state-action value function by $Q_{\\bm{\\pi}}{(\\bm{s},\\bm{a})}$) and policy improvement (to compute $\\bm{\\pi}$) in the direction of maximizing the sum of discounted return (i.e., sum of reward and a portion of entropy here). \nIn the policy evaluation step, using the soft Bellman backup equations \\cite{sac}, the soft state-action value function can be computed iteratively as follows:\n\\begin{align}\nQ_{\\bm{\\pi}}{(\\bm{s}_{k},\\bm{a}_{k})} = r_{k+1} {+} \\lambda\\mathbb{E}{\\left[Q{\\left(\\bm{s}_{k+1},\\bm{a}_{k+1}\\right)}{+}\\psi \\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)}\\right]}. \\label{eq:sacQ}\n\\end{align}\n\nIn large-scale reinforcement learning problems where the state and action spaces are large, $Q_{\\bm{\\pi}}$ and $\\bm{\\pi}$ are approximated in each iteration using \\glspl{dnn} (via critics and actors, respectively). As mentioned before, we leverage target networks and clipped double Q-learning. Hence, in our architecture, there are $4$ \\glspl{dnn} for the first critic, its target critic, second critic, and its target, whose weights are denoted by $\\bm{\\varphi}_1$, $\\bm{\\tilde{\\varphi}}_1$, $\\bm{\\varphi}_2$, and $\\bm{\\tilde{\\varphi}}_2$, respectively. In addition, there are $2$ \\glspl{dnn} for the actor and its target network, whose weights are denoted by $\\bm{\\vartheta}$ and $\\bm{\\tilde{\\vartheta}}$, respectively. Thus, $Q_{\\bm{\\pi}}$, in \\eqref{eq:sacQ}, is approximated by $2$ \\glspl{dnn} as $Q_{\\bm{\\varphi}_i}$, $i\\in\\{1,2\\}$.\n\nSince it is preferable to have the offline training option, let us assume the transitions are stored in a replay buffer, $\\mathcal{B}$. Then, regardless of sampling technique (e.g., uniform or prioritized experience replay) and the mini-batch size ($|\\mathcal{B}_{\\mathrm{mb}}|$), we can represent a sampled transition with ${(\\bm{s},\\bm{a},r,\\hat{\\bm{s}}, I)}$, where $I$ is a binary parameter that is $0$ if the distributed \\gls{ai} converges in $\\hat{\\bm{s}}$, and is $1$, otherwise.\nThen, $\\bm{\\varphi}_1$ and $\\bm{\\varphi}_2$ can be trained by minimizing the \\gls{mse} for each sampled transition as\n\\begin{align}\nJ_\\mathrm{Q}(\\bm{\\varphi}_i)\\coloneq\\mathbb{E}{\\left[\\frac{1}{2}\\!\\left(Q_{\\bm{\\varphi}_i}\\!{(\\bm{s},\\bm{a})}{-}\\widetilde{Q}{(\\hat{\\bm{s}},\\tilde{\\bm{a}},r,I)}\\right)^2\\right]}, \\label{eq:lossQJ}\n\\end{align}\nwhere $\\tilde{\\bm{a}}$ is sampled from $\\bm{\\pi}_{\\tilde{\\vartheta}}{(\\cdot|\\bm{s}_{k+1})}$, and \n\\begin{align}\n\\widetilde{Q}{(\\bm{s},\\bm{a},r,I)} \\coloneq r + I \\lambda {\\left(\\min_{i=1,2} Q_{\\bm{\\tilde{\\varphi}}_i}{\\left(\\bm{s},\\bm{a}\\right)}-\\psi \\ln\\!{\\left(\\bm{\\pi}{(\\bm{a}|\\bm{s})}\\right)}\\right)}. \\label{eq:sacTargetQ}\n\\end{align}\nNote that the minimum represents the smallest Q-value between the two state-action value function approximations for clipped double Q-learning \\cite{td3}.\nThen, in order to minimize $J_Q{(\\cdot)}$, $\\bm{\\varphi}_1$ and $\\bm{\\varphi}_2$ are updated in the direction of gradient descent.\nTo ensure that temporal-difference error remains low, we update target critics' weights gradually by $\\bm{\\tilde{\\varphi}}_i {=} \\nu\\bm{\\varphi}_i{+}(1{-}\\nu)\\bm{\\tilde{\\varphi}}_i$ for $i{\\in}\\{1,2\\}$ at each \\gls{drl} iteration.\n\nIn the policy improvement step, the actor \\gls{dnn} can be updated by minimizing the expected Kullback-Leibler divergence between $\\bm{\\pi}_{\\vartheta}$ and the exponential of the soft state-action value function, which can be rewritten as\n\\begin{align}\\label{eq:lossPolicy1}\nJ_\\mathrm{\\pi}{(\\bm{\\vartheta})}\\coloneq\\mathbb{E} \\left[\\psi\\ln\\!{\\left(\\pi_{\\bm{\\vartheta}}{\\left(\\!\\bm{a}|\\bm{s}\\right)}\\right)}-\\min_{i=1,2} Q_{\\bm{\\varphi}_i}\\!{\\left(\\!\\bm{s},\\bm{a}\\right)}\\right].\n\\end{align}\nTo minimize $J_\\mathrm{\\pi}{(\\cdot)}$ based on the latest policy, we employ the re-parameterization technique, from \\cite{sac}, to reformulate the expectation over actions into an expectation over noise, leading to a smaller variance estimator.\nTherefore, we draw samples from a squashed Gaussian policy such that $\\widehat{\\bm{a}}{\\coloneq}\\tanh{\\left(\\mu_{\\mathrm{G},\\bm{\\vartheta}}{(\\bm{s})}+\\sigma_{\\mathrm{G}, \\bm{\\vartheta}}{(\\bm{s})}\\cdot\\bm{\\chi}\\right)}$, where $\\mu_{\\mathrm{G},\\bm{\\vartheta}}{(\\cdot)}$ and $\\sigma_{\\mathrm{G}, \\bm{\\vartheta}}{(\\cdot)}$ are the estimated mean and standard deviation of a Gaussian distribution, respectively, and $\\bm{\\chi}$ follows a multivariate Gaussian distribution, whose mean is a vector of $0$s, and covariance matrix is the identity matrix. Hence, we can reformulate $J_{\\mathrm{\\pi}}{(\\bm{\\vartheta})}$ by replacing $\\bm{a}$ with $\\widehat{\\bm{a}}$ in \\eqref{eq:lossPolicy1}.\nThe policy parameters, $\\bm{\\vartheta}$, are then updated in the gradient descent direction as in \\cite{sac}. Additionally, we update the target actor weights smoothly by $\\bm{\\tilde{\\vartheta}} = \\nu\\bm{\\vartheta}+(1-\\nu)\\bm{\\tilde{\\vartheta}}$.\n\n\\begin{algorithm}[t]\n\t\\SetKwFor{ForPar}{for}{do in parallel}{end}\n\t\\SetAlgoLined\n\t\\textbf{Input}: Set of \\gls{ai} devices $\\mathcal{N}$, Required number of local models in global update $n$\\;\n\t\\textbf{Output}: Device selection policy as a function of \\gls{ran} state\\;\n\t\\textbf{Initialize}: $\\bm{\\varphi}_1$, $\\bm{\\varphi}_2$, and $\\bm{\\vartheta}$ and set $\\bm{\\tilde{\\varphi}}_1\\gets\\bm{\\varphi}_1$, $\\bm{\\tilde{\\varphi}}_2\\gets\\bm{\\varphi}_2$, and $\\bm{\\tilde{\\vartheta}}\\gets\\bm{\\vartheta}$, $k\\gets1$\\;\n\t\\Comment*[h]{episode: numbers of iterations during which distributed \\gls{ai} converges}\\\\\n\t\\ForEach{episode}{\n\t Set initial device selection, from previous virtual training or random\\;\n\t \\While{true}{\n \t\tReceive $\\bm{c}_{i,k}$ from $n$ \\gls{ai} devices, or $T^{\\max}$\\;\n \t\tObserve $\\bm{s}_k$ (measured within $\\Delta t_{k-1}$)\\;\n \t\t\\lIf{$d_k^{\\mathrm{AI}} < T^{\\max}$}{Compute $\\mbox {\\boldmath $w$}_{k+1}$ as in \\eqref{eq:globalUp}}\n \t\t\\lElse{$\\mbox {\\boldmath $w$}_{k+1}\\gets \\mbox {\\boldmath $w$}_{k}$}\n \t\tSample an action, $\\bm{a}_k {\\sim} \\bm{\\pi}_{\\vartheta}{(\\cdot|\\bm{s}_{k})}$\\;\n \t\tTransmit $\\mbox {\\boldmath $w$}_{k+1}$, via \\gls{ran}, to selected \\gls{ai} devices ($\\bm{\\pi}_{k}^{\\mathrm{u}}$, derived from \\eqref{eq:actionToselection})\\;\n \t\tObserve $\\bm{s}_{k+1}$, and calculate $r_{k+1}$ via \\eqref{eq:reward}\\;\n \t\t\\lIf{distributed \\gls{ai} converges}{$I_{k+1}\\gets0$}\\lElse{$I_{k+1}\\gets1$}\n \t\tStore ${\\left(\\bm{s}_k, \\bm{a}_k, r_{k+1}, \\bm{s}_{k+1}, I_{k+1}\\right)}$ in $\\mathcal{B}$\\;\n \t\t\\lIf{$I_{k+1}=0$}{break}\n \t\t$k\\gets k+1$\\;\n\t }\n\t \\Comment*[h]{Training \\glspl{dnn}}\\\\\n\t \\If{$|\\mathcal{B}|{\\geq}|\\mathcal{B}|_{\\min}$ \\textbf{and} $k = l|\\mathcal{B}_\\mathrm{mb}|, \\forall l{\\in}\\{1,2,\\ldots\\}$}{\n Randomly sample a mini-batch $\\mathcal{B}_{\\mathrm{mb}}$ from $\\mathcal{B}$\\;\n \\ForAll{${\\left(\\bm{s}, \\bm{a}, r, \\hat{\\bm{s}}, I\\right)}\\in\\mathcal{B}_{\\mathrm{mb}}$}{\n Derive $\\widehat{Q}$ using \\eqref{eq:sacTargetQ}, where $\\tilde{\\bm{a}}{\\sim}\\bm{\\pi}_{\\tilde{\\vartheta}}{(\\cdot|\\hat{\\bm{s}})}$\\;\n }\n $\\bm{\\varphi}_i \\gets \\bm{\\varphi}_i {-} \\frac{1}{|\\mathcal{B}_\\mathrm{mb}|}\\sum_{\\forall b\\in\\mathcal{B}_{\\mathrm{mb}}}\\!\\!\\widehat{\\nabla}J_\\mathrm{Q}(\\bm{\\varphi}_i)$, for $i{\\in}\\{1,2\\}$\\;\n $\\bm{\\vartheta}{\\gets}\\bm{\\vartheta} {-} \\frac{1}{|\\mathcal{B}_\\mathrm{mb}|} \\!\\sum_{\\forall b\\in\\mathcal{B}_{\\mathrm{mb}}}\\!\\!\\!\\widehat{\\nabla}\\!J_{\\!\\mathrm{\\pi}}(\\bm{\\vartheta})$, where \\eqref{eq:lossPolicy1} uses $\\widehat{\\bm{a}}$\\;\n $\\bm{\\tilde{\\varphi}}_i \\gets \\nu\\bm{\\varphi}_i{+}(1{-}\\nu)\\bm{\\tilde{\\varphi}}_i$ for $i{\\in}\\{1,2\\}$\\;\n $\\bm{\\tilde{\\vartheta}} \\gets \\nu\\bm{\\vartheta}+(1-\\nu)\\bm{\\tilde{\\vartheta}}$\\;\n }\n\t}\n\\caption{SAC-Based Algorithm for Device Selection in the Coexistence of URLLC and Distributed AI}\n\\label{alg:alg1}\n\\end{algorithm}\n\n\\algorithmcfname\\,\\ref{alg:alg1} summarizes the learning procedure of our \\gls{sac}-based device selection solution in the coexistence scenario. Leveraging the off-policy learning capability of such an algorithm, one can train the \\glspl{dnn} via either the virtual network (e.g., digital twin or realistic simulations) or an operating network (e.g., in safe exploration mode). On the former, $\\bm{a}_k$ can be sampled using the behavior policy in each episode, and episodes can run in parallel to speed up the training. Nevertheless, on the operating network, this algorithm can switch to on-policy learning (i.e., $\\bm{a}_k$ is sampled via the most updated policy, and episodes run consecutively). A hybrid strategy in which the \\glspl{dnn} are trained first with a virtual network and then tuned via an operational network could potentially result in a more efficient learning procedure.\n\nIn the following section, we describe our simulator's modeling principles and its configuration.\n\\section{Simulation Methodology and Configuration}\\label{sec:simulation}\nFor simulating the deployment where \\gls{urllc} and distributed \\gls{ai} services coexist, we considered a factory automation scenario, as demonstrated in \\figurename\\,\\ref{fig:simulationSetup}. More explicitly, we designed a 3D model of a small factory of size $40\\times40\\times10$\\,m$^3$ with $4$ \\glspl{gnb} at the height of $8$\\,m, and with an inter-site distance of $20$\\,m.\n\\subsection{Channel Model}\nIn our simulations, we considered the channel model for the \\gls{inf-dh} use case, where the \\gls{gnb} and the devices are placed, respectively, higher and under the average height of the clutters \\cite{3GPP38901}. The clutters in \\gls{inf-dh} use case typically represent small to medium-sized metallic machines and irregularly shaped objects. In the propagation model, the path loss is calculated by tracing the degradation in signal strength over distance under \\gls{los} and \\gls{nlos} circumstances. The path loss under \\gls{los} and \\gls{nlos} assumptions are given by \\cite{3GPP38901}\n\\begin{subequations}\\label{eq:pathLoss}\n\\begin{align}\n &\\hspace{-1.31mm}PL_{\\mathrm{LOS}}{(d_{\\mathrm{3D}})}\\mathrm{[dB]}{=}31.84{+}21.5\\log_{10}\\!{\\left(d_{\\mathrm{3D}}\\right)}{+}19\\log_{10}\\!{\\left(f_{\\mathrm{c}}\\right)}, \\label{eq:PL_LOS}\\\\\n &\\hspace{-1.31mm}PL_{\\mathrm{NLOS}}{(d_{\\mathrm{3D}})} \\mathrm{[dB]} {=} \\max{\\left(PL_{\\mathrm{LOS}}{(d_{\\mathrm{3D}})}, PL_{\\mathrm{DH}}{(d_{\\mathrm{3D}})} \\right)},\\label{eq:PL_NLOS}\n\\end{align}\n\\end{subequations}\nwhere\n\\begin{equation}\\label{eq:PL_InFDH}\nPL_{\\mathrm{DH}}{(d_{\\mathrm{3D}})} \\mathrm{[dB]}{=}33.63+21.9\\log_{10}{\\left(d_{\\mathrm{3D}}\\right)}+20\\log_{10}{\\left(f_{\\mathrm{c}}\\right)}.\n\\end{equation}\nIn above equations, $d_{\\mathrm{3D}}$ and $f_{\\mathrm{c}}$ denote the $3$D distance between the device and \\gls{gnb}, and the center frequency, respectively. \nIn \\gls{inf-dh}, the \\gls{los} probability is described by \\cite{3GPP38901}\n\\begin{equation}\\label{eq: Pr_LOS}\n\\mathrm{Pr}_{\\mathrm{LOS}}{(d_{\\mathrm{2D}})} {=} \\exp\\left(\\frac{d_{\\mathrm{2D}}\\ln\\!{\\left(1-r_\\mathrm{clut}\\right)}\\left(h_\\mathrm{clut}-h_\\mathrm{device}\\right)}{d_{\\mathrm{clut}}\\left(h_\\mathrm{gNB}-h_\\mathrm{device}\\right)}\\right),\n\\end{equation}\nwhere $d_{\\mathrm{2D}}$ represents the ground distance between \\gls{gnb} and the device. Besides, $h_\\mathrm{gNB}$, $h_\\mathrm{device}$, $d_{\\mathrm{clut}}$, $h_{\\mathrm{clut}}$, and $r_{\\mathrm{clut}}$ denote the \\gls{gnb}'s antenna height, devices' antenna height, the typical clutter size, height and density, and are set in our simulations to $8$\\,m, $1.5$\\,m, $2$\\,m, $6$\\,m, and $60\\%$, respectively. The shadowing for \\gls{los} and \\gls{nlos} is assumed to follow a zero-mean log-normal distribution with standard deviation $4.3$ and $4$ in dB, respectively. In our link level simulations, we first set the position of the $4$ \\glspl{gnb}, as shown in \\figurename\\,\\ref{fig:simulationSetup}. Then, for each pair of possible device positions and the $4$ \\gls{gnb} positions, we generate uncorrelated link conditions with $\\mathrm{Pr}_{\\mathrm{LOS}}{(\\cdot)}$ for \\gls{los}, and ${1-\\mathrm{Pr}_{\\mathrm{LOS}}{(d_{\\mathrm{2D}})}}$ for \\gls{nlos}. Nevertheless, the large scale parameters are generated with correlation distance of $10$\\,m in the horizontal plane. Then, we followed the spatial consistency procedure in \\cite[\\S7.5,\\S7.6.3]{3GPP38901} to generate small scale parameters and channel coefficients, and used the parameters in \\cite[Table 7.5-6 Part-3]{3GPP38901}.\n\n\\begin{comment}\n \\begin{equation} \\label{eq:PL_shadow}\n PL \\mathrm{[dB]}{=}\\begin{cases}\n PL_{\\mathrm{LOS}}+X_\\sigma^{\\mathrm{LOS}}, & \\text{with $\\mathrm{Pr}_{\\mathrm{LOS}}$},\\\\\n PL_{\\mathrm{NLOS}}+X_\\sigma^{\\mathrm{NLOS}}, & \\text{with $1-\\mathrm{Pr}_{\\mathrm{LOS}}$},\n \\end{cases}\n \\end{equation}\n \n \\begin{equation}\\label{eq:PL_shadow2}\n PL \\mathrm{[dB]}{=}\\mathrm{Pr}_{\\mathrm{LOS}}{\\left(PL_{\\mathrm{LOS}}+X_\\sigma^{\\mathrm{LOS}}\\right)},\n \\end{equation}\n\n \\begin{equation}\\label{eq:beta}\n \\beta_c=\\sqrt{\\frac{P_c}{P_{\\mathrm{L}}X}}\n \\end{equation}\n\\end{comment}\n\\begin{figure}[t]\n\t\\centering\n\t\\includegraphics[width=.95\\columnwidth,keepaspectratio]{./Components/Figs/JSAC/sim.pdf}\n\n\t\\caption{The simulation setup.}\n\t\\label{fig:simulationSetup}\n\t\\vspace{-6mm}\n\\end{figure}\n\\subsection{Radio Network Simulator and \\gls{drl} Agent}\nThe radio network simulator is event-based, \\gls{3gpp} compliant, and operates at \\gls{ofdm} symbol resolution. We considered numerology one from \\cite{dahlman5GNr}, implying that each slot and symbol are $0.5$\\,ms and $33.33$\\,$\\mu$s long, respectively. We assumed the channel response matrix in \\eqref{eq:channel} remains constant during one slot.\nTo ensure seamless training of distributed \\gls{ai} until the end of a simulation, we considered \\gls{rlc} in \\gls{am} for distributed \\gls{ai}. Nevertheless, the \\gls{rlc} retransmissions are slow and unlikely to benefit \\gls{urllc} packets with their tight delay bounds \\cite{dahlman5GNr}. Accordingly, we configured the \\gls{rlc} in \\gls{um} for \\gls{urllc} flow. Within URLLC flow, \\gls{ul} and \\gls{dl} \\gls{urllc} traffic are scheduled based on round robin and delay, respectively (i.e., the packet that waited longer in the queue is scheduled first). Moreover, we used proportional-fair scheduling for distributed AI traffic in both directions. Nevertheless, we assumed strict priority scheduling where \\gls{urllc} flow has higher priority than \\gls{ai} flow, implying that \\gls{ai} packets cannot be scheduled unless there is no \\gls{urllc} packet in the queues.\n\nUpon transmission, one or several packets are drawn from the head of the corresponding \\gls{rlc} buffer, depending on the selected \\gls{mcs} on lower layers. Alternatively, \\gls{rlc} could perform segmentation of packets into smaller segments to fit them into transport blocks via which the packets are transmitted. Upon reception, the received instantaneous \\gls{sinr} of each transport block (which depends on the radio channel and the dynamic interference of other devices' transmissions) determines an error probability. Consequently, the receptive \\gls{rlc} entity reassembles successfully decoded segments and delivers them to the application layer. For availability calculation on the application layer, we considered a \\gls{urllc} packet lost if it is not fully received before its corresponding delay bound, followed by applying $T_{sv}$ as in \\eqref{eq:availEst} where, in final availability distributions, $\\Delta t$ is the duration of one simulation, starting from the first action time (i.e., $t_1$) until $t_K$. \\tablename\\,\\ref{tab:simSetup} presents the simulation parameters.\n\nThe \\gls{urllc} traffic is represented by periodic \\gls{ul} and \\gls{dl} traffic, with delay bounds of $6$\\,ms and $4$\\,ms as well as the sizes of $64$ bytes and $80$ bytes, respectively, both with a period of $6$\\,ms. Such \\gls{urllc} traffic characterization aligns with the machine control use case for mobile robots in \\cite{3GPP22104}.\nMotivated by \\cite{3GPP22874AiModel}, we assumed that the shared \\gls{dnn} architecture (i.e., used on the devices and the central node) follows MobileNets \\cite{mobileNets}, a class of efficient \\gls{dnn} models based on a streamlined architecture for mobile and embedded vision applications. We considered $0.25$\\,MobileNet-$224$ in \\cite{mobileNets}, implying that the \\gls{dnn} model has $0.5$ million parameters.\nTo model the distributed \\gls{ai} traffic, we assumed \\gls{fl} and 32 bits quantization for each model parameter, implying that each model (local or global) can be represented as a packet of size $2$\\,MB. Nevertheless, our solution applies to settings where other quantization/compression approaches reduce the communication overhead \\cite{Sindri2020Linear}.\n\n\\begin{table}[t]\n\\centering\n\\caption{Simulation Parameters.}\n\\label{tab:simSetup}\n\\scalebox{0.75}{\\begin{tabular}{l||l}\n\t\\hline\n\t\\multicolumn{2}{c}{\\textbf{Radio Network Simulator Parameters}} \\\\\n\t\\hline\n\t\\textbf{Parameter}& \\textbf{Value}\\\\\n\t\\hline\n\tDeployment & $4$ \\glspl{gnb}\\\\\n\tDuplex/Carrier frequency & FDD/$2.6$\\,GHz\\\\\n\t\\gls{gnb} antenna height& $8$\\,m \\\\\n\tDevices' height &$1.5$\\,m \\\\\n\tNumber of antenna elements in \\gls{gNB}/device & $2$/$2$\\\\ \n\tBandwidth& $40$\\,MHz\\,\\\\\n\tTTI length/Subcarrier spacing& $0.5$\\,ms/$30$\\,KHz \\\\\n\tUL/DL transmit power & $0.2$\\,W/$0.5$\\,W \\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{urllc}\\,Trans. (\\gls{mac}) & $3/2$ \\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{ai}\\,Trans. (\\gls{mac}) & $10/10$\\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{ai}\\,Trans. (\\gls{rlc}) & $8/8$ \\\\\n\t\\gls{ul}/\\gls{dl}\\,\\gls{urllc}\\,delay bound& $6/4$\\,ms \\\\\n\t\\gls{ul}/\\gls{dl} \\gls{urllc} survival time, $T_{\\mathrm{sv}}$& $6/6$\\,ms \\\\\n\tThe total number of \\gls{ai} devices, $N$& $50$ \\\\\n\tThe required number of local models for \\eqref{eq:globalUp} to progress, $n$ & $15$\\\\\n\t$\\upsilon$/$\\zeta$ in the reward function \\eqref{eq:reward} & $0.5$/$100$\\\\\n $T^{\\max}$& $10$\\,s \\\\\n\t\\hline\n\t\\multicolumn{2}{c}{\\textbf{DRL Agent Parameters}} \\\\\n\t\\hline\n\t\\textbf{Parameter}& \\textbf{Value}\\\\\n\t\\hline\n\tDiscount factor, $\\lambda$ & $0.1$\\\\\n\tTraining mini-batch size, $|\\mathcal{B}_\\mathrm{mb}|$ & $200$\\\\\n\tReplay buffer size & $1\\,000\\,000$\\\\\n\tNeural network hidden layers (all six) & $128\\times128$\\\\\n\tPrioritized replay buffer $\\alpha$/$\\beta$ in \\cite{prioReplay} & $0.6$/$0.4$\\\\\n\tLearning rate (for critic, actor and entropy) & $0.0003$ \\\\%(for critic, actor and entropy)\n\t$\\nu$ (for smooth update) & $0.002$\\\\% (for both target actor and critic)\n\t\\hline\n\\end{tabular}}\n\\end{table}\n\nIn our simulation setup, the \\gls{drl} agent resides in a separate server and communicates with the radio network simulator via a ZeroMQ interface. We ran the \\gls{drl} agent on a server with Intel(R) Xeon(R) Gold 6132 CPU @ 2.60\\,GHz, $8$ cores and $64$\\,GB of RAM. In the exploration phase, we trained the \\gls{drl} agent for $7\\,000$ episodes of $50$-iteration length, and our simulation time differed depending on how long each iteration (i.e., $d_k^{\\mathrm{AI}}(\\cdot)$) took. Nevertheless, our \\gls{sac}-based algorithm converged with significantly fewer iterations at around $150\\,000$ iterations. The simulation parameters are given in \\tablename\\,\\ref{tab:simSetup}.\n\nIn the following section, we run comprehensive simulations to study the impact of various design parameters including the distributed \\gls{ai} and \\gls{urllc} load, the number of selected \\gls{ai} devices, and slicing the network resources between \\gls{urllc} and distributed \\gls{ai} services.\n\\section{Results and Discussion}\\label{sec:preformance}\n\\begin{figure*}[t]\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/availUlDl}\n \t\\caption{}\n \\label{fig:availUrllc}\n \\end{subfigure}\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/delayBox}\n \t\\caption{}\n \\label{fig:delayBox}\n \\end{subfigure}\n\t\\caption{The empirical CDF of URLLC devices' availability, $\\hat{\\alpha}_i^\\Gamma$, in (a), and the distributed \\gls{ai} training delay, $d_k^{\\mathrm{AI}}$ in (b), both for the benchmark with semi-random \\gls{urllc} devices. Each box plot represents the minimum, $25$th percentile, median, $75$th percentile, and maximum of the training delay distribution.\n\t\\label{fig:kpi}\n\\end{figure*}\nFor the performance evaluation of our \\gls{sac}-based algorithm (shown as \\texttt{dRlAgent} in the figures), we set up two benchmarks.\n\\begin{enumerate}\n \\item Semi-random \\gls{urllc} devices: In this benchmark, we set up $10$ \\gls{urllc} devices, and assume that each device moves in 1D at a speed of $30$\\,km/h within a short distance of a position that is maintained in different simulations. Yet, the movement direction changes randomly in different seeds.\n \\item Random \\gls{urllc} devices: In this benchmark, we set up $20$ \\gls{urllc} devices. At each simulation, the \\gls{urllc} devices appear at random positions and move in 1D in a random direction at a speed of $30$\\,km/h within a short distance of that position.\n\\end{enumerate}\nFor each of these benchmarks, we compared our proposed solution with three types of baselines: \n\\begin{itemize}\n \\item \\texttt{singleURLLC}: We did not set any distributed AI traffic in our industrial automation scenario and the \\gls{urllc} devices leveraged the entire $40$\\,MHz bandwidth.\n \\item \\mixs{m}: In addition to \\gls{urllc} devices, we had a total of $50$ \\gls{ai} devices. We kept the required number of devices that the central node waits for constant (i.e., $n{=}15$), and randomly picked a set of $m$ participating devices (i.e., $m_k=m, \\forall k\\in\\mathbb{N}$), where $m\\in\\{15,20,30,40,50\\}$.\n \\item \\texttt{slicing[$m$]}: We assigned $25\\%$ of resources to the \\gls{urllc} service (i.e., $10$\\,MHz bandwidth and $0.125$\\,W for total \\gls{dl} transmit power), and the rest to the distributed \\gls{ai} service. We kept $|\\mathcal{U}|$, $|\\mathcal{N}|$, $n$ the same as \\mixs{m} baseline. Besides, we randomly picked a set of $m$ participating devices (i.e., $m_k=m, \\forall k\\in\\mathbb{N}$), where $m\\in\\{15,20\\}$.\n\\end{itemize}\nThe \\texttt{singleURLLC} baseline represents the best possible performance on the \\gls{urllc} availability in our scenario. \nIn \\texttt{dRlAgent}, and to calculate the reward in \\eqref{eq:reward}, we set $\\upsilon$ and $\\zeta$ to $0.5$, and $100$, respectively.\nBesides, we assumed that all \\gls{urllc} devices serve a single use case, and thus, set the availability requirement to $0.99$ (i.e., $\\alpha^\\mathrm{req}=\\alpha_i^\\mathrm{req}=0.99, \\forall i\\in\\mathcal{U}$). \nFor \\texttt{singleURLLC} evaluations, we ran $102$-second simulations $300$ times (with different seeds). In addition, \\texttt{dRlAgent}, \\mixs{m}, and \\texttt{slicing[$m$]} were evaluated with $300$ simulations of $50$-iteration length, resulting in different simulation lengths. Note that there is no progress in distributed AI if all $n$ local models are not collected by the central node within a time duration of $T^{\\max}$, and thus, \\gls{sac} iterations could differ from distributed AI iterations. Regardless, if there is no strict latency constraint from distributed \\gls{ai} task, we can tune $T^{\\max}$ sufficiently large to ensure that time-outs happen rarely. Although our radio network simulator can handle such situations, and for the sake of fair comparison, we set $T^{\\max}{=}10$\\,s. \n\n\\subsection{Semi-random URLLC Devices}\n\\figurename\\,\\ref{fig:kpi} shows the distribution of our main \\glspl{kpi}. \\figurename\\,\\ref{fig:availUrllc} illustrates the empirical \\gls{cdf} of \\gls{urllc} devices' availability, where each sample is the \\gls{ul} or \\gls{dl} availability of one \\gls{urllc} device in one simulation during its whole simulation time. In \\texttt{slicing[$m$]}, due to slicing of the bandwidth and \\gls{gnb} transmission power, the availability distribution is identical for any arbitrary $m$.\nAs this figure shows, compared to \\texttt{singleURLLC} and \\texttt{slicing[$m$]}, the availability of the \\gls{urllc} devices decreases in \\mixs{15} and \\mixs{20}, likely, because of the introduced interference by AI devices in the neighboring cells. Although the scheduler adjusts the \\gls{mcs} to deal with this additional interference\\footnote{Note that such decrease in availability occurs regardless of the scheduler configuration. For example, higher target block error rate cannot overcome the extra interference, and lower target block error rate leads to extra delay, both resulting in lower availability.}, it still affects the availability, such that the availability requirement of $0.99$ can be met with sensitivity of around $0.1$ in \\mixs{15} and \\mixs{20}, rather than $0.012$ in \\texttt{singleURLLC} and $0.014$ in \\texttt{slicing[$m$]} (see \\eqref{eq:availReq}). In \\mixs{m} baselines, most of the availability samples are still greater than or equal to $0.98$. Unlike many conventional services, such decrease is not acceptable for \\gls{urllc} service.\nDespite the impact of introducing the large load of the distributed \\gls{ai} service, we observe that our \\texttt{dRlAgent} solution keeps the \\gls{urllc} devices' availability close to the \\texttt{singleURLLC} and \\texttt{slicing[$m$]} up to $\\alpha^\\mathrm{req}$, and can support the availability requirement of $0.99$ with sensitivity of $0.013$.\n\n\\begin{figure*}[t]\n \\begin{subfigure}{0.49\\textwidth}\n\t\\centering\n \t\\input{Components/Figs/JSAC/numberOfSelDev_PMF}\n \t\\caption{}\n \\label{fig:PMF}\n \\end{subfigure}\n \\begin{subfigure}{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/deviceParticRatio}\n \t\\caption{}\n \\label{fig:selRatio}\n \\end{subfigure}\n\t\\caption{The empirical probability mass function of the number of (a) selected devices, and (b) \\gls{ai} device participation ratio, both for \\texttt{dRlAgent}.}\n\t\\label{fig:dRlAgentPerf}\n\\end{figure*}\n\n\\figurename\\,\\ref{fig:delayBox} depicts the distributed \\gls{ai} training delay. Each box shows the minimum, $25$th percentile, median, $75$th percentile, and the maximum of the observed training delay samples. In general, as \\texttt{slicing[$m$]} and \\mixs{m} boxes, and our results in \\cite{ganjInterplay} suggest, the training delay grows as the number of selected devices increases. However, it is more likely for central node to wait excessively for stragglers, and thus reach time-out when $m{=}n$ (i.e., in both \\texttt{slicing[$15$]} and \\mixs{15}). Moreover, the lower training delay statistics in \\mixs{15} and \\mixs{20} than \\texttt{slicing[$15$]} and \\texttt{slicing[$20$]}, respectively, suggests that distributed \\gls{ai} service in \\mixs{m} generally consumes more resources than the allocated resources in \\texttt{slicing[$m$]}.\nAs this figure indicates, compared to the most competitive baseline (i.e., \\mixs{15}), our \\texttt{dRlAgent} decreases the median training delay by $36\\%$, while the maximum observed training delay is $2.6$\\,s, i.e., $43\\%$ less than \\mixs{20}, which has the lowest maximum observed training delay among the baselines.\n\n\\figurename\\,\\ref{fig:dRlAgentPerf} demonstrates the device selection policy in the evaluation phase, $\\bm{\\pi}_k^\\mathrm{u}$, for \\texttt{dRlAgent}. \\figurename\\,\\ref{fig:PMF} shows the empirical \\gls{pmf} of the number of selected \\gls{ai} devices, $m$ for different iterations. As this figure indicates, given that we set $n{=}15$ in all of our evaluations, our \\gls{sac}-based solution selected at least $1$ extra \\gls{ai} device for more than $40\\%$ of iterations. Such selection of extra devices implies that our device selection solution could still leverage the diversity introduced by extra \\gls{ai} devices, even in our bandwidth-limited deployment. Carefully selected extra devices reduce the sensitivity to the straggler problem and therefore reduce the overall latency without substantial impact on the interference footprint. \n\\figurename\\,\\ref{fig:selRatio} represents the device selection ratio for each \\gls{ai} device that is the number of times an \\gls{ai} device is selected by the \\gls{drl} agent divided by the total number of iterations in the evaluation phase. According to \\figurename\\,\\ref{fig:selRatio}, $9$ \\gls{ai} devices are selected in more than $90\\%$ of the iterations. Also, $24\\%$ of the \\gls{ai} devices are selected with a ratio of $0.1$ to $0.9$. \n\n\\subsection{Random URLLC Devices}\n\\figurename\\,\\ref{fig:kpiRand} illustrates the distribution of availability for \\gls{urllc} devices (in \\figurename\\,\\ref{fig:availUrllcRand}) and training delay of distributed \\gls{ai} service (in \\figurename\\,\\ref{fig:delayBoxRand}). As \\figurename\\,\\ref{fig:availUrllcRand} shows, up to $\\alpha^{\\mathrm{req}}$, \\texttt{dRlAgent} keeps the availability of \\gls{ai} devices close to \\texttt{singleURLLC}, even though the \\gls{urllc} traffic appears at random locations in different seeds.\nSurprisingly, \\texttt{slicing[$m$]} can support $0.99$ availability requirement with $\\gamma{=}0.02$, which is more than twice the sensitivity \\texttt{singleURLLC} and \\texttt{dRlAgent} can support. Compared to the semi-random benchmark, in this benchmark, the number of \\gls{urllc} devices is doubled and they can appear in any part of the factory. Hence, \\texttt{slicing[$m$]} might associate a large number of devices to the same \\gls{gnb}, contributing to lower availability performance with $10$\\,MHz bandwidth and $0.125$\\,W maximum \\gls{dl} transmission power for \\gls{urllc} devices. In \\figurename\\,\\ref{fig:delayBoxRand}, as expected, the delay distribution of \\texttt{slicing[$m$]}, for $m{\\in}\\{10,15\\}$, does not show any significant difference with \\figurename\\,\\ref{fig:delayBox}, and we observe a slight raise in overall training delay distribution of \\mixs{m} for $m{\\in}\\{10,15\\}$ (in the order of tens of milliseconds).\nNevertheless, our \\texttt{dRlAgent} improves the median training delay at least by $30\\%$ (compared to \\mixs{15}), and decreases the maximum observed training delay at least by $15\\%$ (compared to \\mixs{20}). Therefore, even in fully-random \\gls{urllc} devices benchmark, our \\texttt{dRlAgent} successfully orchestrates the distributed \\gls{ai} traffic such that i) the impact on the availability of the \\gls{urllc} devices, given their requirement, is negligible, and ii) it reaches lower training delay statistics.\n \n\\begin{figure*}[t]\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/availUlDlRand}\n \t\\caption{}\n \\label{fig:availUrllcRand}\n \\end{subfigure}\n \n \\begin{subfigure}[t]{0.48\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/delayBoxRand}\n \t\\caption{}\n \\label{fig:delayBoxRand}\n \\end{subfigure}\n\t\\caption{The empirical CDF of URLLC devices' availability, $\\hat{\\alpha}_i^\\Gamma$, in (a), and the distributed \\gls{ai} training delay, $d_k^{\\mathrm{AI}}$ in (b), both for the benchmark with random \\gls{urllc} devices. Each box plot represents the minimum, $25$th percentile, median, $75$th percentile, and maximum of the observed training delay samples.\n\t\\label{fig:kpiRand}\n\\end{figure*}\n\\section{Conclusions}\\label{sec:conculsions}\nIn this paper, we investigated the performance optimization of distributed \\gls{ai} when it coexists with the \\gls{urllc} service with stringent operational requirements. We proposed a \\gls{drl}-powered framework to run distributed AI using a carefully selected subset of devices with the objective of minimizing the AI training delay while maintaining the \\gls{urllc} communication service availability requirements. Our comprehensive \\gls{3gpp}-compliant 5G simulations indicate that our scheme can significantly decrease the total training delay while keeping URLLC devices' availability near the single service scenario (i.e., when all network resources are allocated to the URLLC devices). This paper provides useful insights on how to adaptively control \\gls{ai} traffic (via device selection) to ensure a sustainable coexistence between distributed \\gls{ai} and \\gls{urllc}.\n\nAn alternative approach to control the load of distributed \\gls{ai} is through quantization of the exchanged messages. These approaches often reduce the communication overhead per iteration (and thereby interference footprint) at the expense of some extra iterations to reach convergence. Potential future work is to develop novel approaches that adaptively change the quantization level based on not only the distributed AI algorithm but also the load of the URLLC and the interference footprint of the network.\n{\\appendices\n\\section{Example \\ref{ex1} Show case} \\label{app:1}\nTowards solving the distributed optimization problem \\eqref{eq: MainOptimProblem}, the central node performs the gradient descent update at each iteration $k$ as\n\\begin{equation}\\label{eq:algUpdate}\n\\mbox {\\boldmath $w$}_{k+1} = \\mbox {\\boldmath $w$}_k - \\frac{\\eta_k}{n}\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)},\n\\end{equation}\nwhere the right hand side of the equation represents $A{(\\cdot)}$ in \\eqref{eq:globalUp}, $\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}$ is the true gradient's noisy estimation at the $i$th \\gls{ai} device, and $\\mathcal{N}_{n,k}$ is the set of \\gls{ai} devices from which central node received the first $n$ local updates at the $k$th iteration. We assume that each \\gls{ai} device employs mini-batch gradient descent, and to simplify our notation, the size of the mini-batches are assumed the same for all devices in all iterations. Hence, the overall gradient estimate using the local estimates of the \\gls{ai} devices can be obtained as\n\\begin{equation}\\label{eq:noisyGradient}\n\\frac{1}{n}\\!\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)} \\coloneq \\nabla f{(\\mbox {\\boldmath $w$}_k)}+\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)},\n\\end{equation}\nwhere $\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)} \\coloneq \\frac{1}{n}\\!\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\mbox {\\boldmath $e$}_{i,k}$, and $\\mbox {\\boldmath $e$}_{i,k}$ is the residual term of the $i$th device's estimate at the $k$th iteration, while $\\nabla f{(\\mbox {\\boldmath $w$}_k)}$ is the true gradient (i.e., the gradient of the batch gradient descent on centralized training). Let us make the following assumptions.\n\\begin{assumption}\\label{as:1}\n\\cite{Bottou2018SIAM,flConvergNoniid} The objective functions $f_i$, $\\forall i\\in\\mathcal{N}$, are all $L$-smooth, with Lipschitz constant $L>0$.\n\\end{assumption}\n\n\\begin{assumption}\\label{as:2}\n\\cite{Bottou2018SIAM,flConvergNoniid} The objective functions $f_i$, $\\forall i\\in\\mathcal{N}$, are all strongly convex, with constant $\\mu>0$.\n\\end{assumption}\n\n\\begin{assumption}\\label{as:3}\n\\cite{Bottou2018SIAM} There exist $\\beta_2\\geq (\\beta_1+1)^2 > 0$ that, for $\\forall k\\in \\left[K\\right]$ and $\\forall i\\in \\mathcal{N}$, the objective function $f(\\mbox {\\boldmath $w$})$ and the \\gls{dgd} algorithm have the following limits:\n\\begin{subequations}\\label{eq:momentLimits}\n\\begin{align}\n & \\nabla f{(\\mbox {\\boldmath $w$}_k)}^\\intercal \\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]}\\geq \\beta_1 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2 , \\label{eq:subspace}\\\\\n & \\nabla f{(\\mbox {\\boldmath $w$}_k)}^\\intercal \\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]} \\leq \\beta_2 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2. \\label{eq:upperBoundLim}\n\\end{align}\n\\end{subequations}\nIt is worth mentioning that \\eqref{eq:subspace} implies that the noisy estimation of the gradient is on the same half space with the true gradient, and \\eqref{eq:upperBoundLim} is a weaker assumption of the bounded variance of $\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}/n$, and only bounds it by the actual gradient, $\\nabla f(\\mbox {\\boldmath $w$}_k)$.\n\\end{assumption}\n\\begin{assumption}\\label{as:4}\n\\cite{Bottou2018SIAM,flConvergNoniid} The variance of the gradient norms in each device is bounded, i.e.,\n\\begin{equation}\\label{eq:sigma}\n \\mathbb{E}{\\left[\\norm{\\mbox {\\boldmath $e$}_{i,k}}_2^2\\right]}\\leq \\sigma^2, \\forall k\\in \\left[K\\right], \\forall i\\in \\mathcal{N}.\n\\end{equation}\nSince, $\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)}$ is an unbiased estimator of $\\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]}$, we have\n\\begin{equation}\n \\mathbb{E}{\\left[\\norm{\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)}}_2^2\\right]}\\leq \\frac{\\sigma^2}{n}, \\forall k\\in \\left[K\\right].\n\\end{equation}\nIt is worth noting that \\eqref{eq:upperBoundLim} and \\eqref{eq:sigma} results in \n\\begin{equation}\\label{eq:sgdVarComb}\n \\mathbb{E}{\\left[\\norm{\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]}\\leq\\sigma^2+\\beta_2 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2.\n\\end{equation}\n\\end{assumption}\nThen, if Assumptions~\\ref{as:1}-\\ref{as:4} hold, for a fixed learning rate $\\eta$ that is $0<\\eta\\leq\\frac{\\beta_1+1}{(2\\beta_2+1)L}$, we have \\cite[Theorem 4.6]{Bottou2018SIAM}\n\\begin{multline}\\label{eq:convexConverge}\n\\mathbb{E}{\\left[f{\\left(\\mbox {\\boldmath $w$}_k\\right)} {-} f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}\\right]} \\leq \\frac{\\eta L \\sigma^2}{2n\\beta_1 \\mu} +\\\\ \n{\\left(1{-}\\eta \\beta_1 \\mu\\right)^{k-1}}{\\left(f{\\left(\\mbox {\\boldmath $w$}_1\\right)}{-}f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}{-}\\frac{\\eta L\\sigma^2}{2n\\beta_1 \\mu}\\right)},\n\\end{multline}\nwhere the first term represents the gap to the expected optimal value that \\gls{dgd} converges to when $k{\\to} \\infty$ for a fixed learning rate, and the second term is the convergence rate. Using the learning rate bound and Assumption \\ref{as:3}, and the fact that $\\mu {\\leq} L$(as a result of Assumption \\ref{as:1} and \\ref{as:2}), we can derive that $\\eta \\beta_1 \\mu < 1$, and hence, $(1-\\eta \\beta_1 \\mu)$ is a contraction factor.\n\nLet us assume that our initial point is within a bounded region with respect to the final point that we can converge (i.e., the last parenthesis in \\eqref{eq:convexConverge} is less than or equal to $W^{\\mathrm{A}}$). Note that the additional term of $\\eta L \\sigma^2/{2n\\beta_1 \\mu}$ reflects that \\gls{dgd} cannot converge to the optimal value, but instead, to a neighborhood of $f(\\mbox {\\boldmath $w$}^\\star)$.\nThen, the minimum required number of iterations, $K_{\\min}$, to reach $\\epsilon$-accuracy becomes\n\\begin{align}\\label{eq:kConvex}\nK_{\\min} \\geq \\log_{{\\left(1{-}\\eta \\beta_1 \\mu\\right)}}&{\\left(\\epsilon{-}\\frac{\\eta L\\sigma^2}{2n\\beta_1 \\mu}\\right)} - \\log_{{\\left(1{-}\\eta \\beta_1 \\mu\\right)}}\\!\\!{\\left({W}\\right)} +1.\n\\end{align}\nThen, \\eqref{eq:kConvex} can be simplified as\n\\begin{equation}\\label{eq:kConvexSimpleApp}\nK_{\\min} \\geq \\log_{b}{\\left(\\frac{W^{\\mathrm{A}}}{\\epsilon{-}\\frac{z^{\\mathrm{A}}}{n}}\\right)} +1,\n\\end{equation}\nwhere $b\\coloneq1/\\left(1{-}\\eta \\beta_1 \\mu\\right)>1$, and $z^{\\mathrm{A}}$ is a positive constant which depends on the learning rate, Lipschitz constant, strong convexity, and the error in the gradient estimates for $n{=}1$.\n\n\\begin{comment}\n \\begin{equation} \\label{eq:kprop}\n K\\propto \\log{\\left(\\epsilon-\\frac{z}{n}\\right)}. \n \\end{equation}\n\\end{comment}\n\\section{Example \\ref{ex2} Show case}\\label{app:2}\n\\begin{assumption}\\label{as:5}\n\\cite{Bottou2018SIAM} The objective functions $f_i$, $\\forall i \\in \\mathcal{N}$, are lower bounded by a scalar $f_{\\mathrm{inf}}$ for all sequences of $\\mbox {\\boldmath $w$}_k$.\n\\end{assumption}\nThe non-convex objective functions may contain several local minima and other stationary points. Therefore, we define the convergence criteria on the gradient.\nThen, if Assumptions~\\ref{as:1}, \\ref{as:3}-\\ref{as:5} hold, for a fixed learning rate satisfying $0<\\eta\\leq \\frac{\\beta_1+1}{L(2\\beta_2+1)}$, we have \\cite[Theorem 4.8]{Bottou2018SIAM}\n\\begin{equation}\\label{eq:nonConvexIneq}\n\\mathbb{E}{\\left[\\frac{1}{K}\\!\\sum_{k=1}^K \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]} \\leq \\frac{\\eta L \\sigma^2}{n{(\\beta_1{+}1)}} {+} \\frac{2\\left(f{(\\mbox {\\boldmath $w$}_1)}{-}f_{\\mathrm{inf}}\\right)}{\\eta{(\\beta_1{+}1)}K}.\n\\end{equation}\nTo understand \\eqref{eq:nonConvexIneq}, consider centralized training and batch gradient descent, where there exist no gradient noise, and $\\sigma^2$ becomes zero, resulting in $\\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2\\to0$ as $K$ enlarges. However, in \\gls{dgd}, the average norm of gradients converges to ${\\eta L \\sigma^2}/{n(\\beta_1{+}1)}$.\nNow, the required number of iterations, $K_{\\min}$, to reach $\\epsilon$-accuracy becomes\n\\begin{equation}\\label{eq:nonConvexKmin}\nK_{\\min}\\geq \\frac{2\\left(f{(\\mbox {\\boldmath $w$}_1)}-f_{\\mathrm{inf}}\\right)}{\\eta{(\\beta_1{+}1)}\\left(\\epsilon{-}\\frac{\\eta L \\sigma^2}{n\\left(\\beta_1{+}1\\right)}\\right)}.\n\\end{equation}\nIn \\eqref{eq:nonConvexKmin}, we observe that loosening the convergence criteria (i.e., as $\\epsilon$ increases) leads to a higher required number of iterations to reach $\\epsilon$-accuracy. We can simplify \\eqref{eq:nonConvexKmin} as\n\\begin{equation}\\label{eq:kNonConvexSimpleApp}\nK_{\\min}\\geq \\frac{W^\\mathrm{B}}{\\epsilon{-}\\frac{z^\\mathrm{B}}{n}},\n\\end{equation}\nwhere $W^\\mathrm{B} {\\coloneq} 2\\left(f{(\\mbox {\\boldmath $w$}_1)}{-}f_{\\mathrm{inf}}\\right)/\\eta{(\\beta_1{+}1)}$, and $z^\\mathrm{B}$ is a function of the learning rate, Lipschitz constant, and error in the gradient estimates when $n{=}1$. Note that $\\epsilon$ should be set to a value that is larger than the neighborhood \\gls{dgd} can potentially converge to (i.e., $z^\\mathrm{B}$).\n\\section{Example \\ref{ex3} Show case}\\label{app:3}\nThere are two main differences between \\gls{fl} and \\gls{dgd}, \ni) there could be several local iterations in each \\gls{ai} device between two communications, \nand ii) the model parameters (i.e., the weights of the \\glspl{dnn}) are communicated, rather than the gradients in \\gls{dgd}. Hence, on the local update, each \\gls{ai} device performs \\eqref{eq:algUpdate} for $E$ times before updating the global iteration $k$, as \\eqref{eq:localUp}. \n\n\\begin{assumption}\\label{as:6}\n\\cite{flConvergNoniid} The variance of the gradient estimates in each \\gls{ai} device is bounded, i.e.,\n\\begin{equation}\\label{eq:boundedGradient}\n \\mathbb{E}{\\left[\\norm{\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]}\\leq G^2.\n\\end{equation}\nNote that \\eqref{eq:boundedGradient} is an stricter assumption than Assumptions~\\ref{as:3} and \\ref{as:4}, combined, as shown in \\eqref{eq:sgdVarComb}.\n\\end{assumption}\n\nIf Assumptions~\\ref{as:1}, \\ref{as:2}, \\ref{as:4}, and \\ref{as:6} hold, and $n$ \\gls{ai} devices are selected uniformly at each iteration, then for a diminishing learning rate $\\eta_k = 2/\\mu{(\\xi+k+\\kappa)}$, where $\\kappa({\\in} {[E{-}1]})$ is the local iteration number and $\\xi \\coloneq \\max\\left\\{8L/\\mu,E\\right\\}$, the following inequality holds \\cite[Theorem 3]{flConvergNoniid}:\n\\begin{equation}\\label{eq:flConvergence}\n\\mathbb{E}{\\left[f{\\left(\\mbox {\\boldmath $w$}_k\\right)} {-} f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}\\right]}\\leq \\frac{2L{\\left(\\frac{\\sigma^2}{N}{+}8{(E{-}1)^2}{+} \\rho E^2G^2{+}\\xi G^2\\right)}}{\\mu^2{(\\xi{+}k{+}\\kappa{-}1)}},\n\\end{equation}\nwhere $\\rho\\coloneq\\frac{4{(N-n)}}{n{(N-1)}}$. Hence, the minimum number of global iterations (i.e., rounds of communications) to attain $\\epsilon$-accuracy approximately becomes \\cite{flConvergNoniid}\n\\begin{equation}\\label{eq:flEpsAccuracyApp}\nK_{\\min} \\propto \\frac{1}{\\epsilon}\\left[\\left(1+\\frac{1}{n}\\right)EG^2 + \\frac{\\frac{\\sigma^2}{N}+G^2}{E}+G^2\\right],\n\\end{equation}\nwhere we assumed $\\xi=\\mathcal{O}{(1+E)}$.\n}\n\\ifCLASSOPTIONcaptionsoff\n\\fi\n\n\n\\Urlmuskip=0mu plus 1mu\\relax\n\\bibliographystyle{IEEEtran}\n\\vspace{-2mm}\n\n\n\n\n\n\n\n\n\n\\section*{Reviewer \\thereviewer}}\n\n\\newenvironment{point}\n {\\refstepcounter{point} \\bigskip \\noindent {\\textbf{Comment~\\thepoint} }\\ \\par\\itshape}\n\t{\\par}\n\n\n\\definecolor{mycolor1}{rgb}{0.29, 0.59, 0.82}%\n\\definecolor{mycolor2}{rgb}{1.0, 0.4, 0.6}\n\\definecolor{mycolor3}{rgb}{0.92900,0.69400,0.12500}%\n\\definecolor{mycolor4}{rgb}{0.71,0.49,0.86}\n\\definecolor{mycolor5}{rgb}{0.12, 0.3, 0.17}\n\\definecolor{mycolor6}{rgb}{0.43, 0.21, 0.1}\n\\definecolor{mycolor7}{rgb}{0.52, 0.73, 0.4}\n\\definecolor{mycolor8}{rgb}{0.98, 0.38, 0.5}\n\\definecolor{mycolor9}{rgb}{0.85, 0.44, 0.84}\n\\definecolor{vColor}{rgb}{0.12, 0.3, 0.17}\n\t\n\n\n\\section{Introduction}\\label{sec:intro}\nFuture 6G networks are envisioned as an unprecedented evolution from connected things to connected intelligence, thereby serving as the backbone of a cyber-physical world with the integration of connected devices, intelligence, and humans \\cite{surveyEdgeShi}. Numerous 6G \\gls{ai} applications have emerged recently to improve efficiency and system performance in many vertical sectors, such as industrial automation \\cite{dAIFaultDetectFactory}, autonomous driving \\cite{li2019federated}, and enhanced mobile broadband \\cite{ganjCascaded}. Promising performance gains of \\gls{ai} models come with the significant training workload relying on the massive amount of data collected by the edge devices. Centralized training of the models can be impractical in many wireless communication applications because of i) the distributed nature of the data generated/collected by mobile devices, ii) privacy concerns of sharing the local data with a central server, especially when the computational server is managed by a third party operator, and iii) limited wireless resources (in terms of bandwidth and power). Therefore, privacy-preserving distributed \\gls{ai} techniques have become the cornerstone of recent advancements in \\gls{ai} applications over wireless networks. In most distributed training algorithms, a set of \ndevices upload their local updates (e.g., local gradients in \\gls{dgd} \\cite{duttaKsync}, or local model updates in \\gls{fl} \\cite{fl}) via an \\gls{ul} channel to a central node (or a set of nodes) that maintains global parameters and orchestrates the iterations of the distributed training algorithm. Once the central node updates the global model, it shares it with the devices over a \\gls{dl} channel for the next iteration. \n\nTo highlight the complex interplay between the \\gls{ai} workflow and the underlying communication services, we note that the performance of distributed learning algorithms is affected by the errors and random delays in \\gls{ul}/\\gls{dl} transmissions as well as the AI parameters (e.g., model size, data quality, and training algorithm)~\\cite{saadConvergOpt,chen2021Survey,ganjInterplay}. More specifically, the wall-clock convergence time of distributed training algorithms depend on i) the time delay of every iteration (e.g., the amount of time in which global model parameters are transmitted to the devices, trained locally, and transmitted back to the central node), and ii) the number of iterations. The former is not only a function of the model and data sizes, but also the quality of the wireless channel between the central node and individual computational devices. The general perception is that increasing the \\gls{ai} model size improves the training accuracy \\cite{mobileNets}, given enough data samples and a proper training approach that reduces over-fitting. However, using a larger \\gls{ai} model also means longer communication and computation time, resulting in a potentially higher convergence time \\cite{saadConvergOpt, khaledJsacFLConvergWireless}. Higher AI communication overheads may also be detrimental for other communication services running in parallel to the AI. The tighter the requirements of the underlying service, the harder to design smooth coexistence.\n\n\\Gls{urllc} is characterized by strict requirements in terms of latency, which could be as short as $500$\\,$\\mu$s, and availability, which could be as high as $99.9999999$ \\cite{3GPP22104}. In \\cite{3GPP22104,3GPP22261}, \\gls{3gpp} defines \\textit{communication service availability} as the mean proportion of time during which the communication service meets the required \\gls{qos} of the application it serves. Regarded as the most challenging use case in 5G and beyond 5G, this type of service is supposed to enable challenging applications (e.g., factory automation or autonomous intelligent transport systems \\cite{urllcApp}) that have not been feasible in preceding generations of wireless communication systems.\n\\subsection{Uniqueness of Coexistence of \\gls{urllc} and Distributed \\gls{ai}}\nThere exist a rich literature when it comes to conventional mixed services between \\gls{urllc} and \\gls{embb} (e.g., \\cite{urllcEmbbTransCoex,anand2020jointUrllcEmbb}), \\gls{urllc} and \\gls{mmtc} (e.g., \\cite{uavUrllcMmtc}), or even all three together (e.g., \\cite{popovski20185gUrllcEmbbMmtc}). Notice that there are two main fundamental differences between distributed \\gls{ai} services and other traditional communication traffic. First, the performance of these conventional services is characterized by the statistics of a communication metric (e.g., throughput or energy consumption for \\gls{embb} and \\gls{mmtc}, respectively). Nevertheless, distributed training service is an iterative and collaborative task aiming to solve an optimization problem as quickly as possible. Hence, the performance may not be affected only by the transmission characteristics of a single device. The potential statistical correlation of data of various devices allows distributed learning to operate using a carefully chosen subset of devices. Moreover, higher quality updates from few devices at each iteration may be more beneficial than lower quality updates from many devices \\cite{denizUpdateAware}, showing that sum throughput is not the right metric due to the dependence among data across different devices. Second, in addition to well-known decision parameters for other communication services, distributed AI has a unique set of decision variables such as the model size, choice of algorithm, and selection of devices participating in the training task.\nHowever, to the best of our knowledge, no literature exists on the coexistence of distributed \\gls{ai} and \\gls{urllc} services in which both communication and learning aspects are jointly considered.\nDespite the lack of such analysis, numerous 6G \\gls{ai} applications have emerged recently to improve efficiency and system performance in various cyber-physical control applications such as industrial automation \\cite{dAIFaultDetectFactory}, and autonomous driving \\cite{vehicularFlSaad}. We believe the adoption and success of such applications depend highly on the analysis and optimization of these scenarios with existing \\gls{urllc} services, which serves as the main motivation of our work.\n\n\\subsection{Distributed \\gls{ai} over Wireless Networks}\nThe main challenges of running distributed training over wireless networks arise from its two main characteristics: i) the dynamics of wireless propagation, which depends on various factors in the network, such as noise, interference, and fading, and ii) resource (e.g., bandwidth and transmit power) scarcity \\cite{chen2021Survey}. The latter becomes even more significant since distributed training requires many iterations exchanging typically large models or model parameters \\cite{chen2021Survey}. From the communication service perspective, several recent studies have focused on resource management and device selection techniques. References \\cite{jsacPoorLowLatencyFL,optFlIIoT,chenJointLearningCommFL,dinhFEDL,denizUpdateAware} leverage resource management and, more explicitly, device selection to improve the performance of distributed learning in terms of training loss or convergence time. For example, in \\cite{chenJointLearningCommFL}, the authors evaluated the effects of resource management, device selection, and transmit power on \\gls{fl} convergence and optimized these wireless parameters to reduce \\gls{fl} training loss. Reference \\cite{dinhFEDL} proposes an \\gls{fl} algorithm that can handle heterogeneous user data assuming strong convex and smooth loss functions. The resource allocation is then optimized to improve the convergence of the proposed algorithm. In \\cite{denizUpdateAware}, it is shown that the performance of distributed learning can be improved if the device selection algorithm jointly considers the updates from the devices with their channel conditions. Nevertheless, none of these works address the mixed service scenario where the distributed learning performance is determined not only by the wireless network characteristics and limitations but also by the demands on the higher priority service.\n\n\\begin{comment}\n\\subsection{Potential Extra Related Works}\n \\red{References: \n\\begin{itemize}\n \\item \\cite{jsacPoorLowLatencyFL}: Joint device selection and channel assignment to minimize the training delay subject to participation ratio which is dependent on the clients' differential privacy and learning performance. The problem is addressed by multi-agent multiarmed bandit (MAMAB) framework.\n \\item \\cite{saadConvergOpt}: Joint resource allocation and device selection scheme to jointly minimize the FL convergence time and the FL training loss. The proposed a probablistic device selection scheme that allows the users whose local FL models have large impacts on the global FL model to associate with the BS with high probability.\n \\item \\cite{khaledJsacFLConvergWireless}: They evaluate the convergence rate of FL regarding the joint impact of communication and training. Combining it with the network model, they formulate the optimal scheduling problem for FL implementation for \\gls{iid}.\n \\item \\cite{jsacAdaptiveFL}, \\cite{chenJointLearningCommFL}, and \\cite{iccClientSelJapan} (FedCS: Heuristic to maximize the number of participants while keeping training delay under a threshold).\n \\item from practical perspective, the time that it takes to change the slice size should be considered (for global optimizations), and per-gNB orchestration is not sufficient.\n\\end{itemize}}\n\\end{comment}\n\\subsection{Contributions:}\nAs distinct services, the performance of both \\gls{urllc} and distributed \\gls{ai} over wireless networks have been widely investigated in the existing literature. However, the coexistence of \\gls{urllc}, with its stringent requirements, and distributed \\gls{ai} workflow, with its unique traffic model and performance characteristics, have not yet been discussed in the literature. Such coexistence introduces new fundamental challenges as well as unique trade-offs between \\gls{urllc} latency and availability on the one hand, and convergence time and accuracy of distributed \\gls{ai} on the other hand. When running the distributed \\gls{ai} training over some clusters, under ideal processing and communications assumptions, increasing the model size and/or the number of participating \\gls{ai} devices at each iteration will often lead to better convergence. \nHowever, when distributed learning is run over a set of wireless devices, because of interference and limited available bandwidth, various parameters (e.g., \\gls{sinr}, \\gls{per}, and queuing delays) might impact the training delay to the extent that the improvement in convergence rate may become nonessential.\nWhen \\gls{urllc} comes into the picture, the performance becomes bounded by two more factors, i.e., competing with higher priority traffic and \\gls{urllc} availability requirements. The latter is use case specific and, depending on the requirements' strictness and the use case sensitivity these requirements, increasing the model size and/or the number of participating \\gls{ai} devices can have a non-linear detrimental effect on the distributed AI performance.\n\nIn this paper, we focus on the understanding and optimizing the coexistence of distributed learning and \\gls{urllc} services. We introduce a soft synchronous distributed learning protocol in which the central node broadcasts the global model updates upon receiving local updates from a subset of the available devices. Then, leveraging \\gls{drl}, we develop a framework to dynamically select a set of participating \\gls{ai} devices in order to minimize the convergence time of the distributed learning task while maintaining the strict \\gls{urllc} availability requirements. \n\\textit{To the best of our knowledge, this is the first work that comprehensively studies and optimizes the underlying trade-offs between the \\gls{urllc} performance and the distributed \\gls{ai} workflow, simultaneously running on the same wireless network.} In summary, our contributions are as follows:\n\n\\begin{itemize}\n \\item We develop a model for the operational metrics of the \\gls{urllc} service (i.e., communication service availability) and essential parameters that characterize \\gls{ai} training workflow (i.e., training delay, model size, convergence, and accuracy) and investigate the interplay between them. Since the system is resource-limited (in terms of bandwidth and transmission power) and \\gls{urllc} availability requirements are strict, a subset of devices must be selected to perform each iteration of distributed \\gls{ai}. Accordingly, we formulate an optimization problem that minimizes the average training latency of the distributed \\gls{ai} to reach $\\epsilon$-accuracy while sustaining \\gls{urllc}'s communication service availability requirements.\n \n \n \\item We transform the formulated coexistence optimization problem into a \\gls{mdp} and design an action masking technique to put a lower bound on the minimum number of \\gls{ai} devices required to participate in each iteration of distributed training. However, the solution may select a higher number of devices than this minimum to address the so-called \\textit{straggling effect}.\n \n \\item To deal with the unknown dynamics of our complex cellular system, we propose a data-driven approach that optimizes the device selection policy via a state-of-the-art off-policy \\gls{drl} algorithm, namely\\gls{sac}. In our scheme, the device selection policy of each \\gls{ai} device is distributed across distinct neurons, resulting in a linear increase in the \\gls{sac} output size with the number of devices.\n \n \\item We evaluate our framework utilizing a \\gls{3gpp}-compliant 5G simulator in a factory automation use case. We observe that the number of participating \\gls{ai} devices can significantly impact the performance of \\gls{urllc}. Our results provides important insights for the ongoing standardization activities of distributed \\gls{ai}.\n\\end{itemize}\n\nThe rest of this paper is organized as follows. We provide the necessary preliminaries on the distributed learning, and our system model in Section~\\ref{sec:Background} and Section~\\ref{sec:systemModel}, respectively. We then formulate the problem in Section~\\ref{sec:PM}. Section~\\ref{sec:solution} presents the proposed \\gls{mdp} modeling and \\gls{drl}-based device selection.\nWe describe our simulation methodology in Section~\\ref{sec:simulation}, and discuss the results in Section~\\ref{sec:preformance}. Finally, Section~\\ref{sec:conculsions} concludes the paper.\n\n\n\\textit{Notations:} \nNormal font $x$ or $X$, bold font $\\bm{x}$, bold font $\\bm{X}$, and uppercase calligraphic font $\\mathcal{X}$ denote scalars, vectors, matrices and sets, respectively.\nWe denote by $[X]$ the set $\\{1,2,\\ldots,X\\}$, by $\\left[\\bm{x}\\right]_{i}$ element $i$ of vector $\\bm{x}$, by $\\left[\\bm{X}\\right]_{i,j}$ the element $ij$ of matrix $\\bm{X}$, and by $|\\mathcal{X}|$ the cardinality of set $\\mathcal{X}$. We define $\\mathds{1}\\{\\bm{x}\\}$ as the element-wise indicator function returning $\\bm{y}$, where $[\\bm{y}]_i$ takes 1 when condition $[\\bm{y}]_i$ holds, and 0, otherwise. The curled inequality ($\\succeq$ or $\\succ$) represents element-wise inequality. We use $\\bm{1}$ and $\\bm{0}$ to denote all-one and all-zero vectors, respectively.\n\\section{Background on Distributed Learning}\\label{sec:Background}\nConsider the problem of minimizing a sum of smooth functions $\\{f_{i}: \\mathbb{R}^d \\mapsto \\mathbb{R}\\}_{i \\in [N]}$, with corresponding gradients $\\{\\nabla f_{i}: \\mathbb{R}^d \\mapsto \\mathbb{R}^d\\}_{i \\in [N]}$:\n\\begin{equation}\\label{eq: MainOptimProblem}\n\\mbox {\\boldmath $w$}^\\star {\\coloneqq} \\min_{\\mbox {\\boldmath $w$}\\in\\mathbb{R}^d} f(\\mbox {\\boldmath $w$}) {=} \\min_{\\mbox {\\boldmath $w$}\\in\\mathbb{R}^d} \\frac{1}{N}\\sum\\nolimits_{i \\in [N]}f_i(\\mbox {\\boldmath $w$}).\n\\end{equation}\nSuch problems frequently arise in distributed learning where $N$ is the number of distributed devices, $f$ could express the global loss function, and each $f_i$ could represent a local loss function. In practice, to parallel the computations or to preserve the privacy of local datasets, we use distributed algorithms to solve~\\eqref{eq: MainOptimProblem}~\\cite{Bottou2018SIAM}. That is, at iteration $k$, a subset of the workers compute and upload their local gradients $\\{\\nabla f_i(\\mbox {\\boldmath $w$}_k)\\}_i$ to a central node, which updates the model and broadcasts the updated model parameters $\\mbox {\\boldmath $w$}_{k+1}$ back to the workers.\\footnote{We have a similar set of trade-offs and solutions for non-smooth functions, where we cannot define gradients. The major difference compared to this paper is that instead of updating based on gradients, we may need to update based on its generalizations, like subgradients~\\cite{scaman2018optimal}.} \\gls{fl} is another popular method in which the workers will run one or several local training passes before uploading their local models. The central node will then take a global average over them. The communication overhead is almost the same as uploading gradients \\cite{li2019federated}. However, most of these \\gls{ul} messages (gradients or local models) may be redundant, carrying almost no additional information since they can be retrieved from their past communicated messages as well as messages of other devices \\cite{ghadikolaei2021lena}. Forcing some of them to remain silent would i) reduce \\gls{ul} interference to other users, ii) increase throughput, and iii) improve latency. \n\n\nIn conventional synchronous distributed training methods, the central node waits until it receives the local updates from all participating devices, leading to a considerable inactive time at the central node as well as faster devices waiting for stragglers. To tackle the straggling problem, in $n$-sync approaches, the central node only waits for a subset of participating devices, say $n$ out of all $N$ devices, and updates the global model using their messages at every iteration \\cite{duttaKsync}. Nevertheless, vanilla $n$-sync-based methods add extra load on the underlying communication system, as they will ask all the devices to upload their data, and the central node starts its update with the first $n$ received data. Reference \\cite{ji2020dynamic} proposed an algorithm to adjust $n$ at every iteration. References~\\cite{fl,ghadikolaei2021lena,chen2018lag} proposed various approaches to eliminate some unnecessary uploads. However, none of those works study or optimize the interplay between distributed learning and other parallel communication services.\n\n\n\\section{System Model}\\label{sec:systemModel}\n\\subsection{Network Model}\\label{sec:networkModel}\nWe consider an industrial automation scenario, where a set of $\\mathcal{G}{\\coloneqq}[G]$ \\glspl{gnb}, each consisting of $1$ cell, serve a set of $\\mathcal{U}{\\coloneqq}[U]$ industrial devices in the factory hall execute different functions that enable automated production. The communication system should timely and reliably deliver i) monitoring data to \\glspl{gnb} and ii) computed or emergency control commands to the actuators.\n\nFor simplicity, we assume that the \\gls{ai} devices are distinct from the industrial devices, and there exist a set of $\\mathcal{N} {\\coloneqq} [N]$ \\gls{ai} devices. Moreover, we assume that the \\gls{ai} central node needs to receive the relevant local information from $n$ out of these $N$ \\gls{ai} devices to update its global model at each iteration. To tackle the straggler effect, the \\gls{ai} central node might request an update from $\\mathcal{N}_{m,k} \\subseteq \\mathcal{N}$ at iteration $k$, where $|\\mathcal{N}_{m,k}| {=} m_k({\\geq} n)$ of the devices to participate in the training. Hence, at $k$th iteration, the central node might request $m_k{-}n$ extra backup devices to mitigate the straggler problem in a synchronous distributed learning scenario.\n\nTo manage the coexistence of two services, \nwhere the priority of services are inherently different, 5G and beyond 5G envision two approaches. The first approach, employed in this paper, is to use the existing standardized protocols in 5G-NR for \\gls{qos} handling. In this case, each connected device is assigned with one or several \\gls{qos} flows and data radio bearers, where the former is set in the core network, depending on the service \\gls{qos} requirements. For example, in our scenario, the traffic from/to \\gls{urllc} devices is set to have high priority \\gls{qos} flow to ensure low latency, whilst the traffic from/to \\gls{ai} devices is set to have low priority \\gls{qos} flow. Each (or several) of these \\gls{qos} flows are then mapped to a data radio bearer in the \\gls{ran}. In \\gls{gnb} and devices, there is an associated \\gls{rlc} buffer to each data radio bearer, and in our case, with strict priority scheduling \\cite{dahlman5GNr}. The second approach is to have separate slices for \\gls{urllc} and distributed \\gls{ai}, resulting in full resource separation (e.g., in terms of bandwidth).\n\n\n\\subsection{Distributed Learning Process}\\label{sec:distAlg}\nWe consider a network of $N$ \\gls{ai} devices that cooperatively solve a distributed learning problem. Assuming that $\\mathcal{N}_{n,k} \\subseteq \\mathcal{N}_{m,k}$ is the subset of size $n$ whose updates the central node receives first at iteration $k$, then iteration $k$ of an abstract distributed algorithm reads:\n\\vspace{-2mm}\n\\begin{subequations}\\label{eq:dAI}\n\\begin{align}\n &\\mbox {\\boldmath $w$}_{k+1}{=}A{\\left(\\mbox {\\boldmath $c$}_{i,k}, \\mbox {\\boldmath $w$}_k\\right)},\\quad \\mbox{for} \\quad \\forall i \\in \\mathcal{N}_{n,k} \\label{eq:globalUp}\\\\\n & \\mbox {\\boldmath $c$}_{i,k}{=}C_i{\\left(\\mbox {\\boldmath $w$}_{k}\\right)},\\quad \\mbox{for} \\quad \\forall i \\in \\mathcal{N}_{m,k} \\label{eq:localUp}\n\\end{align}\n\\end{subequations}\nwhere function $A$ represents an algorithm update of the decision variable $\\mbox {\\boldmath $w$}_k$, function $C_i$ picks out the relevant information, $\\mbox {\\boldmath $c$}_{i,k}$, that node $i$ uploads to the server to run the algorithm. This general algorithmic framework covers many \\gls{ml} algorithms, including \\gls{fl} and \\gls{dgd}, with or without data compression. For example, when $C_i$ returns a stochastic gradient, say $\\widehat{\\nabla} f_i(\\mbox {\\boldmath $w$}_{k})$, and $A=\\mbox {\\boldmath $w$}_k - \\eta \\sum_i \\widehat{\\nabla} f_i(\\mbox {\\boldmath $w$}_{k})/n$ for some positive step size $\\eta$, we recover $n$-sync and synchronous \\gls{dgd} for $n({<} N)$ and $n({=} N)$ \\cite{duttaKsync}, respectively. When $C_i$ returns an updated local model parameters of \\gls{ai} device $i$ and $A$ takes an averaging step over a subset of $n ({\\leq} N)$ \\gls{ai} devices, we recover FL ($n$-sync or synchronous). Without loss of generality, and for the sake of simplicity, we assume that the gradients' noise are \\gls{iid} \\cite{gradientNoise}.\n\n\n\\subsection{Channel Model}\nTo model the channel, we consider a \\gls{mimo} system in which we leverage the time varying 3D spatial channel model from \\gls{3gpp} in \\cite{3GPP38901}. In this model, channels are characterized via clustering the multipath components, arriving at antenna arrays, in delay and double-directional angle (i.e., the zenith and azimuth of the \\glspl{aoa} at the receiver and \\glspl{aod} at the transmitter). For simplicity, let us assume that $N_\\mathrm{g}$ and $N_\\mathrm{d}$ are respectively the number of antenna elements of \\gls{gnb} and devices.\nWe denoted by $\\bm{H}_{x,y}(\\tau;t) \\in \\mathds{C}^{N_\\mathrm{d}\\times N_\\mathrm{g}}$ the baseband channel response at time $t$ to an input impulse at time $t-\\tau$, between $x$th device and $y$th \\gls{gnb} in \\gls{dl}.\nThen, an entry of $\\bm{H}_{x,y}(\\tau;t)$ for $p$th receiving antenna element and $q$th transmitting antenna element can be computed as\n\\begin{multline}\\label{eq:channel}\n\\left[\\bm{H}_{x,y}{\\left(\\tau;t\\right)}\\right]_{p,q} {\\coloneqq} \\sum_{l=1}^{N_\\mathrm{c}} \\sqrt{\\beta_{l}^{x,y}} \\sum_{s=1}^{N_\\mathrm{s}} {\\left(\\bm{g}_{p}^{x,y}{\\left(t,l,s\\right)}\\right)}^\\intercal \\bm{F}_{\\mathrm{xp}}^{x,y}{\\left(t,l,s\\right)}\\\\ \\bm{g}_{q}^{x,y}{\\left(t,l,s\\right)}e^{j\\Upsilon_{p,q}^{x,y}{\\left(t,l,s\\right)}}\\delta{\\left(\\tau-\\tau_{\\mathrm{p},l,s}\\right)},\n\\end{multline}\nwhere $N_{\\mathrm{c}}$ and $N_{\\mathrm{s}}$ are respectively the number of clusters and rays, and $\\beta_{l}^{x,y}$ is a function of path loss, shadowing and $l$th cluster normalized power. Besides, ${\\bm{g}_{p}^{x,y}\\!{\\left(\\cdot\\right)}}$ is the field patterns of $p$th receiving element that $s$th ray of $l$th cluster has in the direction defined by arriving zenith and azimuth angles, $\\bm{F}_{\\mathrm{xp}}^{x,y}\\!{\\left(\\cdot\\right)}$ is $2{\\times}2$ matrix modeling the cross polarization power ratio for $s$th ray of $l$th cluster, $\\bm{g}_{q}^{x,y}\\!{\\left(\\cdot\\right)}$ is the field patterns of $q$th transmitting element that $s$th ray of $l$th cluster has in the direction defined by departing zenith and azimuth angles, $\\Upsilon_{p,q}^{x,y}{\\left(\\cdot\\right)}$ is a function of location vector of $p$th receiving and $q$th transmitting element as well as the Doppler frequency, and finally, $\\tau_{\\mathrm{p},l,s}$ is the propagation delay of $s$th ray in $l$th cluster. For \\gls{ul}, $\\bm{H}_{x,y}{\\left(\\tau;t\\right)}$ can be derived by swapping $p$ and $q$ in \\eqref{eq:channel}.\n\nNote that although we leverage the \\gls{3gpp} statistical spatial channel model~\\cite{3GPP38901}, our problem formulation (Section~\\ref{sec:problemFor}) and solution approach (Section~\\ref{sec:solution}) are general and not limited to this channel model. In the next section, we use these models to formulate our performance metrics.\n\\section{Performance Metrics and Problem Formulation}\\label{sec:PM}\n\\subsection{\\gls{urllc} Metric: Communication Service Availability}\\label{sec:urllcKpi}\nThe players in operational, information and communication technologies are entering new territory in which 5G is utilized to connect industries. The main challenge in such a merger is ensuring that the operational requirements are fulfilled during a 5G system’s operating phase \\cite{5gAcia}.\nOne well-accepted metric in the operational technology domain is availability. Hence, \\gls{3gpp}, as the primary standardization consortium for mobile telecommunications, has attempted to specify the requirements for communication service availability from the application layer perspective in \\cite{3GPP22104,3GPP22261}.\nThe main difference between the end-to-end communication service performance and the observed performance on the network layer is driven by a system parameter called \\textit{survival time}, $T_{\\mathrm{sv}}$. Survival time is the duration of time for which the application layer can tolerate failures in the communication system without any performance degradation in availability \\cite{ganjPimrcTranslation}.\nWe denote the network layer state by a Bernoulli random variable $X_i^{\\Gamma}{\\left(t\\right)}$, where $\\Gamma{\\in}$\\{\\gls{ul}, \\gls{dl}\\}, and $X_i^{\\Gamma}{\\left(t\\right)}$ for the $i$th \\gls{urllc} device is zero if the last packet is not received at the communication interface within a specified delay bound, because either it could not be decoded at the lower layers or faced excessive retransmission, and/or queuing delays.\nConsequently, assuming that application recovery time is negligible, we define the per-device application layer state variable, $Y_i^{\\Gamma}{\\left(t\\right)}$ as \n\\begin{equation}\\label{eq:appState}\nY_i^{\\Gamma}{\\left(t\\right)}{\\coloneqq}\n \\begin{cases}\n 0, & \\mathrm{if} \\int_{t-T_{\\mathrm{sv}}}^t X_i^{\\Gamma}\\!{\\left(\\tau\\right)}d\\tau = 0,\\\\\n {1,} & \\mathrm{otherwise}. \\\\\n \\end{cases}\n\\end{equation}\nTherefore, we can define the long-term communication service availability for the $i$th \\gls{urllc} device in $\\Gamma$ direction as \\cite{ganjGCOrch}\n\\begin{equation}\\label{eq:availInf}\n \\alpha_i^{\\Gamma}{\\coloneqq} \\lim_{t\\to \\infty}\\Pr\\left\\{Y_i^{\\Gamma}\\!{\\left(t\\right)} {=} 1\\right\\} {=} \\lim_{T\\to\\infty}\\frac{1}{T}\\int_{0}^T Y_i^{\\Gamma}\\!{\\left(t\\right)} dt.\n\\end{equation}\nThe availability in $\\Gamma$ direction can be estimated over a short time period using\n\\begin{equation}\\label{eq:availEst}\n \\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_k\\right)} {\\coloneqq} \\frac{1}{\\Delta t_k}\\int_{t_k}^{t_k+\\Delta t} Y_i^{\\Gamma}\\!{\\left(t\\right)} dt.\n\\end{equation}\nIn \\gls{urllc}, the requirement is often defined in the form of \\cite{popovskiUrllc}\n\\begin{equation}\\label{eq:availReq}\n \\Pr\\left\\{\\alpha_i^{\\Gamma} \\leq \\alpha_i^{\\mathrm{req}}\\right\\}\\leq\\gamma, \\enspace\\forall i\\in \\mathcal{U},\\\n\\end{equation}\nwhere $\\alpha_i^{\\mathrm{req}}$ is the communication service availability requirement of the use case that \\gls{urllc} device $i$ belongs to, and $\\gamma$ is the sensitivity of this use case to $\\alpha_i^{\\mathrm{req}}$. We follow \\cite{3GPP22104} in assuming that the requirement for \\gls{ul} and \\gls{dl} availability is the same for a given use case.\n\n\n\\subsection{Distributed AI Metrics: Training Delay and Accuracy}\\label{sec:dAIKpi}\nThe performance of the distributed \\gls{ai} can be characterized by two factors: training delay (or convergence time) and training accuracy. \n\nThe convergence time of distributed \\gls{ai} algorithms is bounded by the communication and processing latency \\cite{saadConvergOpt}. Let us denote the \\gls{ai} device selection at iteration $k$ by an indicator vector of $\\mathcal{N}_{m,k}$ as $\\bm{\\pi}_k^{\\mathrm{u}}{=}\\left[\\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_1, \\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_2, \\ldots, \\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_N\\right]$, where $\\left[\\bm{\\pi}_{k}^{\\mathrm{u}}\\right]_i {\\in} \\{0,1\\}, \\forall i{\\in} \\mathcal{N}$. Assuming that the central node requests a subset $\\mathcal{N}_{m,k}$ (i.e., $\\bm{1}^T \\bm{\\pi}_k^{\\mathrm{u}} {=}m_k$) to participate in the training while it waits for $n(\\leq m_k, \\forall k\\in [K])$ local gradients/models, then the \\gls{ai} training delay in the central node for the $k$th iteration, $d_k^{\\mathrm{AI}}$, can be derived as\n\\begin{equation}\\label{eq:aiTrainingDelay}\nd_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)} {\\coloneqq} \\min\\!{\\left\\{\\!\\min_{\\substack{\\mathcal{N}_{n,k}\\subseteq\\mathcal{N}_{m,k},\\\\|\\mathcal{N}_{n,k}|{=}n}}{\\!\\!\\left(\\max_{i\\in\\mathcal{N}_{n,k}}{\\!\\!\\left(d_{i,k}^{\\mathrm{D}}{+}d_{i,k}^{\\mathrm{pr}}{+}d_{i,k}^{\\mathrm{U}}\\right)}\\right)}{+}d_k^{\\mathrm{pr}}, T^{\\max}\\!\\right\\}},\n\\end{equation}\nwhere $\\bm{\\Pi}_k^{\\mathrm{u}}{\\coloneq} \\left[\\bm{\\pi}_1^{\\mathrm{u}}, \\ldots, \\bm{\\pi}_k^{\\mathrm{u}}\\right]$ is the device selection matrix, $d_{i,k}^{\\mathrm{D}}$, $d_{i,k}^{\\mathrm{pr}}$, and $d_{i,k}^{\\mathrm{U}}$, are the latency of \\gls{dl} transmission of the global model, local training (represented in \\eqref{eq:localUp}), and \\gls{ul} transmission of local gradients/models for $k$th iteration of the $i$th device, respectively. It is worth noting that $d_{i,k}^{\\mathrm{D}}$ and $d_{i,k}^{\\mathrm{U}}$ include the transmission processing, payload transmission, occurred retransmissions, and queuing delay (which is determined by the number of devices sharing the same time-frequency resources in the current and previous \\glspl{tti}), and thus, are a function of $\\bm{\\Pi}_k^{\\mathrm{u}}$. Besides, $d_k^{\\mathrm{pr}}$ is the $k$th iteration processing delay required to perform the global model update on the central node, represented in \\eqref{eq:globalUp}. Thus, in \\eqref{eq:aiTrainingDelay}, for each subset of $\\mathcal{N}_{n,k}$ with cardinality of $n$, the maximum aggregated communication and processing delay is calculated among devices. Then, among subsets, $d_k^{\\mathrm{AI}}{(\\cdot)}$ is determined by picking the subset with the lowest delay. However, to avoid an infinite waiting time in the central node, we define $T^{\\mathrm{max}}$ as the maximum permissible delay of every iteration. \\figurename\\,\\ref{fig:dAiWorkflow} demonstrates the training delay in $n$-sync distributed training.\n\\begin{figure}[t]\n\t\\centering\n\t\\includegraphics[width=.99\\columnwidth,keepaspectratio]{./Components/Figs/JSAC/delayFig.pdf}\n\n\t\\caption{The illustration of training delay in distributed AI workflow.}\n\t\\label{fig:dAiWorkflow}\n\t\\vspace{-6mm}\n\\end{figure}\n\nTraining accuracy is another performance metric for a distributed learning task. We can find ``critical points\" as the set of points where the norm of their derivative is 0. The local minima and maxima are a subset of these points. In general, under some regularity assumptions, we can often converge to an approximate critical point (a point wherein the norm of the gradients gets smaller than some positive $\\epsilon$). Note that a critical point may not be an optimal point, e.g., a saddle point. We denote by $K_{\\min}$ the iteration number after which the algorithm converges. As we will show in Section~\\ref{sec:solution}, our design depends on $K_{\\min}$. In the following, we provide a few examples in which we can formulate $K_{\\min}$. \nAll examples are formally defined and presented in Appendix~\\ref{app:1}-\\ref{app:3}.\n\n\n\\begin{example}\\label{ex1}\n\\textit{\\gls{dgd}, strongly convex \\cite{Bottou2018SIAM}:} Under smoothness and strong convexity assumptions of the objective functions, as well as a few more technical assumptions \\cite{Bottou2018SIAM}, we can show that the minimum number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy\\footnote{\\label{convexFootnote}In the case of a convex function, a critical point is the global minima.} of the objective functions fulfills the following inequality:\n\\begin{equation}\\label{eq:kConvexSimple}\nK_{\\min} \\geq \\log_{b}{\\left(\\frac{W^{\\mathrm{A}}}{\\epsilon{-}\\frac{z^{\\mathrm{A}}}{n}}\\right)} +1,\n\\end{equation}\nwhere $n$ is the number of participants in the global averaging step, $W^{\\mathrm{A}}$ is a positive constant representing the initial distance to the minimal value of the global loss function, $f{(\\mbox {\\boldmath $w$}^\\star)}$, and $z^{\\mathrm{A}}$ is a positive constant which depends on the learning rate, Lipschitz constant, strong convexity, and the variance of gradient noise when $n{=}1$.\n\\end{example}\n\n\n\\begin{example}\\label{ex2}\n\\textit{\\gls{dgd}, non-convex \\cite{Bottou2018SIAM}:} Under the smoothness assumption of the objective functions, we can show that the minimum number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy to a critical point, fulfills the following inequality:\n\\begin{equation}\\label{eq:kNonConvexSimple}\nK_{\\min}\\geq \\frac{W^\\mathrm{B}}{\\epsilon{-}\\frac{z^\\mathrm{B}}{n}},\n\\end{equation}\nwhere $n$ is the number of participants in the global averaging step, $W^\\mathrm{B}$ is a positive constant which depends on the initial distance to the lower bound of $f$, and $z^\\mathrm{B}$ is a positive constant which is a function of the learning rate, Lipschitz constant, and the variance of gradient noise when $n{=}1$.\n\\end{example}\n\n\\begin{example}\\label{ex3}\n\\textit{\\gls{fl} Algorithm \\cite{flConvergNoniid}:} Consider a \\gls{fl} algorithm with $E$-step local iterations, $N$ devices with \\gls{iid} datasets, global model averaging with $n ({\\leq N})$ randomly selected local models, smooth and strongly convex objective functions. We can show that the minimum required number of iterations, $K_{\\min}$, to ensure $\\epsilon$-accuracy\\footnoteref{convexFootnote} of the objective functions would scale with\n\\begin{equation}\\label{eq:flEpsAccuracy}\nK_{\\min} \\propto \\frac{1}{\\epsilon}\\left[\\left(1+\\frac{1}{n}\\right)EG^2 + \\frac{\\frac{\\sigma^2}{N}+G^2}{E}+G^2\\right],\n\\end{equation}\nwhere $G^2$ and $\\sigma^2$ are the upper bounds on the second moment of the gradient estimates and gradient noises, respectively, in different \\gls{ai} devices.\n\\end{example}\n\nIn \\eqref{eq:kConvexSimple}-\\eqref{eq:flEpsAccuracy}, it is clear that the number of iterations that distributed \\gls{ai} requires to reach $\\epsilon$-accuracy (i.e., $K_{\\min}$) decreases as the required number of devices participating in the global update (i.e., $n$) increases in $n$-sync scheme.\nIn the next section, we show how to use $K_{\\min}$ in our solution approach. \n\\subsection{Problem Formulation} \\label{sec:problemFor}\nHaving defined the system model and the \\glspl{kpi} of interest, the next step is to design a device selection scheme to optimize the distributed \\gls{ai} training process over a wireless network, while still fulfilling the \\gls{urllc} availability requirements. Then, from the definition of $d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}$ in \\eqref{eq:aiTrainingDelay}, the availability requirement in \\eqref{eq:availReq}, and assuming $n$-sync scheme for distributed \\gls{ai} procedure, the joint optimization problem for distributed \\gls{ai} implementation over a wireless network can be expressed as follows:\n\\begin{subequations}\\label{eq:opt1UserSel}\n\\begin{alignat}{3\n&\\!\\min_{\\bm{\\Pi}_K^{\\mathrm{u}}} & & \\sum_{k=1}^{K} {d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}}\\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)},\\tag{\\ref{eq:opt1UserSel}}&\\label{eq:UserSelObj}\\\\\n&\\text{s.t.} & & \\Pr\\left\\{\\alpha_{i}^{\\Gamma} \\leq \\alpha_{i}^{\\mathrm{req}}\\right\\}\\leq\\gamma, \\enspace\\forall i \\in \\mathcal{U}, \\forall \\Gamma \\in \\{\\mathrm{UL,DL}\\}, &\\label{eq:UserSelC1}\\\\\n& & & \\bm{1}^\\intercal\\bm{\\pi}_k^\\mathrm{u} \\geq n, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC2}\\\\\n& & & \\bm{\\pi}_k^\\mathrm{u} \\in \\{0,1\\}^{N}, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC3}\\\\\n& & & \\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)} \\in \\{0,1\\}, \\enspace\\forall k \\in [K],&\\label{eq:UserSelC4}\n\\end{alignat}\n\\end{subequations}\nwhere\n$\\Omega{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}$ is a binary variable taking $1$ for the iteration numbers at which the iterative algorithm has not reached $\\epsilon$-accuracy (e.g., $\\forall k{\\in} [K_{\\min}{-}1]$ for Example \\ref{ex1}-\\ref{ex3}), and $K$ should be selected sufficiently large within which the distributed \\gls{ai} algorithm's $\\epsilon$-accuracy is ensured. To fulfill the required number of local updates in the global update of \\eqref{eq:dAI} in the $n$-sync scheme, \\eqref{eq:UserSelC2} enforces the central node to select at least $n$ \\gls{ai} devices and yet it is flexible to select any number of extra devices to tackle straggling problem (i.e. $m_k = \\bm{1}^\\intercal\\bm{\\pi}_k^\\mathrm{u}$, and $m_k\\in[n, n+1, \\ldots, N], \\forall k\\in[K]$).\nMoreover, \\eqref{eq:UserSelC3} indicates that device selection policy is a binary vector. Note that \\eqref{eq:UserSelC2}, \\eqref{eq:UserSelC3}, and \\eqref{eq:UserSelC4} must be respected at all decision time epochs. \n\nOn the one hand, the communication service availability of each \\gls{urllc} device is a function of the channel state variable, $X_{i}^{\\Gamma}{(t)}$, and the end-to-end delay of its packets. These two depend on many variables, such as instantaneous \\gls{sinr}, path gain, code rate, and transmission buffer status. On the other hand, these variables also impact the delay of the selected \\gls{ai} devices, influencing the training delay for each iteration. In addition, and since the \\gls{urllc} service has higher priority than the distributed \\gls{ai} service, the amount of \\gls{urllc} traffic being served on the corresponding \\gls{gnb} severely affects the distributed \\gls{ai} training delay. However, joint modeling of these \\gls{urllc} and distributed \\gls{ai} \\glspl{kpi} is highly complex and mandates significant assumptions on the queue models, channel, and traffic.\n\n\\section{Transformation to \\gls{mdp} Problem}\\label{sec:solution}\nThe optimization problem \\eqref{eq:opt1UserSel} is a non-convex optimization problem. In addition, characterizing the impact of $\\bm{\\pi}_k^\\mathrm{u}$ on our \\glspl{kpi} necessitates explicit modeling of the channel and queues, which involves approximations that may not be accurate in practice. Therefore, we propose to model the device selection problem \\eqref{eq:opt1UserSel} as an finite horizon \\gls{mdp}. Consequently, in Section~\\ref{sec:mdp}, we specify the state space, $\\mathcal{S}$, action space, $\\mathcal{A}$, and set of all possible rewards, $\\mathcal{R}$, by dynamic interactions between the central node and \\gls{ran} environment. Nevertheless, it is not possible to derive the transition probability function ($p: \\mathcal{S}\\times\\mathcal{R}\\times\\mathcal{S}\\times\\mathcal{A} \\mapsto [0,1]$) in our complex and dynamic environment. Hence, in Section~\\ref{sec:sac}, we solve our device selection problem with a model-free \\gls{drl} algorithm, namely \\gls{sac}, to address the finite horizon fully observable discounted \\gls{mdp} problem.\n\nOn the path to transforming optimization problem \\eqref{eq:opt1UserSel} into a \\gls{mdp} problem, we note that our two services are on different time scales. In other words, \\gls{urllc} performance should be measured based on the actual time, whilst each time step for the control loop is as long as one iteration of the distributed AI algorithm takes (i.e., $d_k^{\\mathrm{AI}}{\\left(\\cdot\\right)}$ for $k$th iteration). Hence, the control loop is not periodic in actual time, and is triggered by central node. Accordingly, we use $\\Delta t_k$ (i.e., $t_{k+1}{-}t_{k}$) wherever necessary to emphasize the time instants iteration $k$ begins and ends.\n\n\\subsection{\\gls{mdp}}\\label{sec:mdp}\nThe essential elements of the \\gls{mdp} are determined as follows.\n\\subsubsection{State Space, $\\mathcal{S}$} \\label{sec:state}\nThe state space characterizes the environment. We categorize the environment's state at iteration $k$ (i.e., $s_k\\in\\mathcal{S}$), into three classes i) the observations from each \\gls{urllc} device, ii) the observations from each \\gls{ai} device, and the observations from each \\gls{gnb}. In the following, we describe these three classes.\n\n\\noindent\\textbf{\\gls{urllc} \\gls{qos} variables:}\nCommunication service availability of each \\gls{urllc} device, as the main \\gls{urllc} \\gls{kpi}, is a function of \\gls{per}, mean downtime, and survival time \\cite{ganjPimrcTranslation,ganjGCOrch}. Except for survival time which is static and use case specific, the state should include both (\\gls{ul}/\\gls{dl}) \\gls{per} and (\\gls{ul}/\\gls{dl}) mean downtime, estimated via empirical measurements within $\\Delta t_{k-1}$. In addition to these measures that explicitly affect the communication service availability, cell association, (\\gls{ul}/\\gls{dl}) buffer size (at $t_k$), (\\gls{ul}/\\gls{dl}) \\gls{sinr}, and (\\gls{ul}/\\gls{dl}) delay are other metrics that implicitly impact the availability. However, \\gls{sinr} and delay statistics might vary significantly during, possibly long, $\\Delta t_{k-1}$. Hence, we represent their distribution using specific statistics of these measures, i.e., $1$st percentile, $5$th percentile and median of the \\gls{sinr} distribution, and $95$th percentile, $99$th percentile, and median of the delay distribution. In fact, utilizing such percentiles is well motivated by \\gls{urllc} extreme availability performance, which, under proper system design, is affected by the tail of delay and \\gls{sinr} distributions \\cite{URLLCTRS}. \n \n\\noindent\\textbf{Distributed \\gls{ai} delay variables:}\nThe training delay of each iteration, $d_k^{\\mathrm{AI}}{(\\bm{\\Pi}_k^{\\mathrm{u}})}$, is a function of the \\gls{ai} device selection; thus, we include a binary variable in the state indicating if this device has been selected in the last iteration. As indicated by \\eqref{eq:aiTrainingDelay}, the delay of the \\gls{dl} transmission of the global model, $d_{i,k}^{\\mathrm{D}}$, and the delay of the \\gls{ul} transmission of local gradients/models, $d_{i,k}^{\\mathrm{U}}$, directly impacts the training delay, and thus, should be included in the state. Besides, (\\gls{ul}/\\gls{dl}) buffer size (at $t_k$), and (\\gls{ul}/\\gls{dl}) \\gls{sinr} of the underlying transmissions has an implicit effect on delay, and therefore, we include them in the state. Focusing on the overall statistics (unlike \\gls{urllc} service), we represent the \\gls{sinr} distribution of the underlying transmissions for each \\gls{ai} device with its $5$th percentile, median and $95$th percentile.\nNote that no empirical measurement exists for $d_{i,k}^{\\mathrm{D}}$, $d_{i,k}^{\\mathrm{U}}$, and \\gls{sinr} of \\gls{ai} devices whose central node does not receive their local models in the $k$th iteration (i.e., $i\\notin \\mathcal{N}_{n,k}$). Moreover, if an \\gls{ai} device was selected at $t_{k-1}$, but its local update is not part of the first $n$ received local updates, its buffer size at $t_k$ would not be empty.\n\n\\noindent\\textbf{\\gls{gnb} level observations:}\nOn the \\gls{gnb} level, the number of resource blocks each service has consumed (in both \\gls{ul} and \\gls{dl} directions) significantly impacts both training delay and availability. Therefore, we propose to include the mean number of allocated resource blocks (per slot), within $\\Delta t_{k-1}$, for each service in the state.\n\n\\subsubsection{Action Space, $\\mathcal{A}$} \\label{sec:actionMDP}\nThe action space, $\\mathcal{A}$, is the set of all possible decision variables by which the \\gls{drl} agent interacts with the environment. Considering \\eqref{eq:opt1UserSel}, our action vector at the $k$th iteration should be the device selection vector $\\bm{\\pi}_k^{\\mathrm{u}}$. However, to mask out selections that do not follow condition \\eqref{eq:UserSelC2}, we define $\\bm{a}_k$ as a continuous vector where each element represents the action for an \\gls{ai} device (i.e., $\\bm{a}_k\\in \\left[-1,1\\right]^N$). Then, the mapping from $\\bm{a}_k$ to $\\bm{\\pi}_k^{\\mathrm{u}}$ is determined by\n\n\\begin{equation} \\label{eq:actionToselection}\n \\bm{\\pi}_k^{\\mathrm{u}}\\coloneq\\begin{cases}\n \\mathds{1}\\{\\bm{a}_k\\succeq \\bm{0}\\}, & \\text{if $\\bm{1}^\\intercal\\mathds{1}\\{\\bm{a}_k\\succeq \\bm{0}\\}\\geq n$},\\\\\n \\mathds{1}\\{\\bm{a}_k\\succeq a_{k}^{(n)}\\bm{1}\\}, & \\text{otherwise},\n \\end{cases}\n\\end{equation}\nwhere $a_{k}^{(n)}$ is the $n$th largest element in vector $\\bm{a}_k$.\n\n\\subsubsection{The Reward Function, $r$} \\label{sec:reward}\nIn general, the \\gls{drl} agent follows an explicit goal, i.e., to maximize its cumulative discounted rewards. In other words, the reward function, $r_{k+1}$, is the payoff for taking action $\\bm{a}_k$ at state $\\bm{s}_k$. \nAs \\eqref{eq:availInf} indicates, the communication service availability of \\gls{urllc} devices is measured in infinite time while the temporal granularity of the \\gls{drl} is determined by the amount of time each distributed \\gls{ai} training round takes, $\\Delta t_k$. Therefore, we suggest using the availability estimator from Equation \\eqref{eq:availEst} as part of the reward function. \nAlthough estimating such a long-term measure over a short period may be imprecise, it does reflect the consequence of the device selection policy as the application layer observes it in the near future. Given the optimization objective \\eqref{eq:opt1UserSel} and condition \\eqref{eq:UserSelC1}, we define the reward for iteration $k+1$, $r_{k+1}$, as\n\\begin{multline}\n r_{k+1} \\coloneqq \\upsilon\\exp{\\left(\\zeta\\min\\left\\{\\min_{\\substack{i\\in\\mathcal{U},\\\\\\Gamma \\!{\\in}\\! \\{\\mathrm{UL},\\mathrm{DL}\\}}}\\!\\!\\!\\!\\!{\\left(\\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_{k}\\right)}-\\alpha_i^\\mathrm{req}\\right)},0\\right\\}\\right)} \\\\ \n + \\left(1-\\upsilon\\right) \\frac{T^{\\mathrm{max}} - d_k^{\\mathrm{AI}}{\\left(\\bm{\\Pi}_k^{\\mathrm{u}}\\right)}}{T^{\\mathrm{max}}},\\label{eq:reward}\n\\end{multline}\nwhere $\\upsilon {(\\in [0,1])}$ is the weight characterizing the relative importance between \\gls{urllc} reward and distributed \\gls{ai} reward. In the \\gls{urllc} reward, $\\left(\\hat{\\alpha}_i^{\\Gamma}{\\left(\\Delta t_k\\right)}-\\alpha_i^\\mathrm{req}\\right)$ is negative for those \\gls{urllc} devices that did not meet their corresponding availability requirement within $\\Delta t_k$, regardless of the transmission direction, $\\Gamma$. Hence, our reward function design enforces the \\gls{drl} agent to maximize the availability of the worst \\gls{urllc} device among those that do not meet their availability requirements. In addition, $\\zeta$ is a design parameter which is a function of the sensitivity ($\\gamma$), and the precision that the maximum availability requirement needs (i.e., $\\zeta {\\propto} \\max_i\\!{\\left(a_i^{\\mathrm{req}}\\right)}{/}\\gamma$).\nNevertheless, the device selection policy gets the full reward on the \\gls{urllc} part (i.e., $\\upsilon$) when all of the devices fulfill their availability requirements. For the distributed \\gls{ai} reward, the shorter $d_k^{\\mathrm{AI}}\\left(\\cdot\\right)$ is, the larger the $\\bm{\\pi}_k^{\\mathrm{u}}$'s reward becomes. Moreover, to minimize the tail of the per-device availability distribution and the average training delay, \\gls{urllc} reward decreases exponentially while the reduction in distributed \\gls{ai} reward is linear.\n\n\\subsection{Solution With Soft Actor-Critic Based Algorithm}\\label{sec:sac}\nIn this paper, we employ \\gls{sac} to solve the device selection \\gls{mdp} problem in the coexistence scenario formulated above. The following characteristics of \\gls{sac} benefit our scenario~\\cite{sac}\ni) \\gls{sac} is an off-policy model-free \\gls{drl} algorithm in which explorations seek to find an optimal policy maximizing not only the expected accumulated discounted reward but also the expected entropy at each visited state, ii) \\gls{sac} has an actor-critic architecture where the critic \\gls{dnn} estimates state-action pair values, while the actor \\gls{dnn} computes the policy, and iii) SAC conquers the massive sampling complications and minimizes the sensitivity of \\gls{drl} in hyperparameters. In the rest of this section, we describe \\gls{sac} in \\cite{sac} for our device selection problem according to our actual implementation.\n\nThe main objective in \\gls{sac} is to find an optimal stochastic policy that maximizes the discounted sum of reward and entropy. For our device selection problem, the action space $\\mathcal{A}$ has $N$ dimensions (refer to Section~\\ref{sec:actionMDP}). Let us denote the action space of the $i$th dimension, which corresponds to the action space of the $i$th \\gls{ai} device with $\\mathcal{A}_i$. Hence, the optimal stochastic policy $\\bm{\\pi}^\\star{(\\cdot|\\bm{s})}$, $\\forall \\bm{s}\\in\\mathcal{S}$, maps the state $\\bm{s}$ to a vector of probability distributions, each over the dimension of the action space (i.e., $\\mathcal{A}_i$). Thus, the objective of \\gls{sac} i\n\\begin{align}\\label{eq:sacEntropy}\n\\bm{\\pi}^\\star{\\left(\\cdot|\\cdot\\right)} \\coloneq \\argmax_{\\bm{\\pi}\\left(\\cdot|\\cdot\\right)} \\mathbb{E}{\\left[\\sum_{k=1}^{K} \\lambda^{k-1} {\\left[r_{k+1}+\\psi \\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)}\\right]}\\right]}, \n\\end{align}\nwhere $K$, $\\lambda {(\\in[0,1])}$ and $\\psi{(>0)}$ are the episode length, discount factor, and the temperature parameter specifying the relative importance between the reward and entropy terms, respectively. Furthermore, $\\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)} \\coloneqq \\mathbb{E}{\\left[-\\ln\\!{\\left(\\bm{\\pi}{(\\bm{a}|\\bm{s}_k)}\\right)}\\right]}$ is the entropy of policy $\\bm{\\pi}$ at state $\\bm{s}_k$. Introducing entropy in \\eqref{eq:sacEntropy} guides the policy to explore more broadly while avoiding blatantly unfavorable trajectories.\n\nWe employ prioritized experience replay originally developed for \\gls{dqn} \\cite{prioReplay}. In this approach, transitions with greater temporal disparities are repeated more frequently, adding a bias toward those action. To address this issue, we remove this priority during training.\nMoreover, the proposed \\gls{sac}-based algorithm framework makes use of target networks and clipped double Q-learning, both of which established for \\gls{td3}. According to \\cite{td3}, such additions can mitigate overestimation in value approximation while assuring convergence to a suboptimal policy.\n\nThe \\gls{sac} algorithm (and actor-critic methods in general) uses policy iteration, in which the algorithm alternates between policy evaluation (to compute the state-action value function by $Q_{\\bm{\\pi}}{(\\bm{s},\\bm{a})}$) and policy improvement (to compute $\\bm{\\pi}$) in the direction of maximizing the sum of discounted return (i.e., sum of reward and a portion of entropy here). \nIn the policy evaluation step, using the soft Bellman backup equations \\cite{sac}, the soft state-action value function can be computed iteratively as follows:\n\\begin{align}\nQ_{\\bm{\\pi}}{(\\bm{s}_{k},\\bm{a}_{k})} = r_{k+1} {+} \\lambda\\mathbb{E}{\\left[Q{\\left(\\bm{s}_{k+1},\\bm{a}_{k+1}\\right)}{+}\\psi \\mathbb{H}{\\left(\\bm{\\pi}\\left(\\cdot|\\bm{s}_k\\right)\\right)}\\right]}. \\label{eq:sacQ}\n\\end{align}\n\nIn large-scale reinforcement learning problems where the state and action spaces are large, $Q_{\\bm{\\pi}}$ and $\\bm{\\pi}$ are approximated in each iteration using \\glspl{dnn} (via critics and actors, respectively). As mentioned before, we leverage target networks and clipped double Q-learning. Hence, in our architecture, there are $4$ \\glspl{dnn} for the first critic, its target critic, second critic, and its target, whose weights are denoted by $\\bm{\\varphi}_1$, $\\bm{\\tilde{\\varphi}}_1$, $\\bm{\\varphi}_2$, and $\\bm{\\tilde{\\varphi}}_2$, respectively. In addition, there are $2$ \\glspl{dnn} for the actor and its target network, whose weights are denoted by $\\bm{\\vartheta}$ and $\\bm{\\tilde{\\vartheta}}$, respectively. Thus, $Q_{\\bm{\\pi}}$, in \\eqref{eq:sacQ}, is approximated by $2$ \\glspl{dnn} as $Q_{\\bm{\\varphi}_i}$, $i\\in\\{1,2\\}$.\n\nSince it is preferable to have the offline training option, let us assume the transitions are stored in a replay buffer, $\\mathcal{B}$. Then, regardless of sampling technique (e.g., uniform or prioritized experience replay) and the mini-batch size ($|\\mathcal{B}_{\\mathrm{mb}}|$), we can represent a sampled transition with ${(\\bm{s},\\bm{a},r,\\hat{\\bm{s}}, I)}$, where $I$ is a binary parameter that is $0$ if the distributed \\gls{ai} converges in $\\hat{\\bm{s}}$, and is $1$, otherwise.\nThen, $\\bm{\\varphi}_1$ and $\\bm{\\varphi}_2$ can be trained by minimizing the \\gls{mse} for each sampled transition as\n\\begin{align}\nJ_\\mathrm{Q}(\\bm{\\varphi}_i)\\coloneq\\mathbb{E}{\\left[\\frac{1}{2}\\!\\left(Q_{\\bm{\\varphi}_i}\\!{(\\bm{s},\\bm{a})}{-}\\widetilde{Q}{(\\hat{\\bm{s}},\\tilde{\\bm{a}},r,I)}\\right)^2\\right]}, \\label{eq:lossQJ}\n\\end{align}\nwhere $\\tilde{\\bm{a}}$ is sampled from $\\bm{\\pi}_{\\tilde{\\vartheta}}{(\\cdot|\\bm{s}_{k+1})}$, and \n\\begin{align}\n\\widetilde{Q}{(\\bm{s},\\bm{a},r,I)} \\coloneq r + I \\lambda {\\left(\\min_{i=1,2} Q_{\\bm{\\tilde{\\varphi}}_i}{\\left(\\bm{s},\\bm{a}\\right)}-\\psi \\ln\\!{\\left(\\bm{\\pi}{(\\bm{a}|\\bm{s})}\\right)}\\right)}. \\label{eq:sacTargetQ}\n\\end{align}\nNote that the minimum represents the smallest Q-value between the two state-action value function approximations for clipped double Q-learning \\cite{td3}.\nThen, in order to minimize $J_Q{(\\cdot)}$, $\\bm{\\varphi}_1$ and $\\bm{\\varphi}_2$ are updated in the direction of gradient descent.\nTo ensure that temporal-difference error remains low, we update target critics' weights gradually by $\\bm{\\tilde{\\varphi}}_i {=} \\nu\\bm{\\varphi}_i{+}(1{-}\\nu)\\bm{\\tilde{\\varphi}}_i$ for $i{\\in}\\{1,2\\}$ at each \\gls{drl} iteration.\n\nIn the policy improvement step, the actor \\gls{dnn} can be updated by minimizing the expected Kullback-Leibler divergence between $\\bm{\\pi}_{\\vartheta}$ and the exponential of the soft state-action value function, which can be rewritten as\n\\begin{align}\\label{eq:lossPolicy1}\nJ_\\mathrm{\\pi}{(\\bm{\\vartheta})}\\coloneq\\mathbb{E} \\left[\\psi\\ln\\!{\\left(\\pi_{\\bm{\\vartheta}}{\\left(\\!\\bm{a}|\\bm{s}\\right)}\\right)}-\\min_{i=1,2} Q_{\\bm{\\varphi}_i}\\!{\\left(\\!\\bm{s},\\bm{a}\\right)}\\right].\n\\end{align}\nTo minimize $J_\\mathrm{\\pi}{(\\cdot)}$ based on the latest policy, we employ the re-parameterization technique, from \\cite{sac}, to reformulate the expectation over actions into an expectation over noise, leading to a smaller variance estimator.\nTherefore, we draw samples from a squashed Gaussian policy such that $\\widehat{\\bm{a}}{\\coloneq}\\tanh{\\left(\\mu_{\\mathrm{G},\\bm{\\vartheta}}{(\\bm{s})}+\\sigma_{\\mathrm{G}, \\bm{\\vartheta}}{(\\bm{s})}\\cdot\\bm{\\chi}\\right)}$, where $\\mu_{\\mathrm{G},\\bm{\\vartheta}}{(\\cdot)}$ and $\\sigma_{\\mathrm{G}, \\bm{\\vartheta}}{(\\cdot)}$ are the estimated mean and standard deviation of a Gaussian distribution, respectively, and $\\bm{\\chi}$ follows a multivariate Gaussian distribution, whose mean is a vector of $0$s, and covariance matrix is the identity matrix. Hence, we can reformulate $J_{\\mathrm{\\pi}}{(\\bm{\\vartheta})}$ by replacing $\\bm{a}$ with $\\widehat{\\bm{a}}$ in \\eqref{eq:lossPolicy1}.\nThe policy parameters, $\\bm{\\vartheta}$, are then updated in the gradient descent direction as in \\cite{sac}. Additionally, we update the target actor weights smoothly by $\\bm{\\tilde{\\vartheta}} = \\nu\\bm{\\vartheta}+(1-\\nu)\\bm{\\tilde{\\vartheta}}$.\n\n\\begin{algorithm}[t]\n\t\\SetKwFor{ForPar}{for}{do in parallel}{end}\n\t\\SetAlgoLined\n\t\\textbf{Input}: Set of \\gls{ai} devices $\\mathcal{N}$, Required number of local models in global update $n$\\;\n\t\\textbf{Output}: Device selection policy as a function of \\gls{ran} state\\;\n\t\\textbf{Initialize}: $\\bm{\\varphi}_1$, $\\bm{\\varphi}_2$, and $\\bm{\\vartheta}$ and set $\\bm{\\tilde{\\varphi}}_1\\gets\\bm{\\varphi}_1$, $\\bm{\\tilde{\\varphi}}_2\\gets\\bm{\\varphi}_2$, and $\\bm{\\tilde{\\vartheta}}\\gets\\bm{\\vartheta}$, $k\\gets1$\\;\n\t\\Comment*[h]{episode: numbers of iterations during which distributed \\gls{ai} converges}\\\\\n\t\\ForEach{episode}{\n\t Set initial device selection, from previous virtual training or random\\;\n\t \\While{true}{\n \t\tReceive $\\bm{c}_{i,k}$ from $n$ \\gls{ai} devices, or $T^{\\max}$\\;\n \t\tObserve $\\bm{s}_k$ (measured within $\\Delta t_{k-1}$)\\;\n \t\t\\lIf{$d_k^{\\mathrm{AI}} < T^{\\max}$}{Compute $\\mbox {\\boldmath $w$}_{k+1}$ as in \\eqref{eq:globalUp}}\n \t\t\\lElse{$\\mbox {\\boldmath $w$}_{k+1}\\gets \\mbox {\\boldmath $w$}_{k}$}\n \t\tSample an action, $\\bm{a}_k {\\sim} \\bm{\\pi}_{\\vartheta}{(\\cdot|\\bm{s}_{k})}$\\;\n \t\tTransmit $\\mbox {\\boldmath $w$}_{k+1}$, via \\gls{ran}, to selected \\gls{ai} devices ($\\bm{\\pi}_{k}^{\\mathrm{u}}$, derived from \\eqref{eq:actionToselection})\\;\n \t\tObserve $\\bm{s}_{k+1}$, and calculate $r_{k+1}$ via \\eqref{eq:reward}\\;\n \t\t\\lIf{distributed \\gls{ai} converges}{$I_{k+1}\\gets0$}\\lElse{$I_{k+1}\\gets1$}\n \t\tStore ${\\left(\\bm{s}_k, \\bm{a}_k, r_{k+1}, \\bm{s}_{k+1}, I_{k+1}\\right)}$ in $\\mathcal{B}$\\;\n \t\t\\lIf{$I_{k+1}=0$}{break}\n \t\t$k\\gets k+1$\\;\n\t }\n\t \\Comment*[h]{Training \\glspl{dnn}}\\\\\n\t \\If{$|\\mathcal{B}|{\\geq}|\\mathcal{B}|_{\\min}$ \\textbf{and} $k = l|\\mathcal{B}_\\mathrm{mb}|, \\forall l{\\in}\\{1,2,\\ldots\\}$}{\n Randomly sample a mini-batch $\\mathcal{B}_{\\mathrm{mb}}$ from $\\mathcal{B}$\\;\n \\ForAll{${\\left(\\bm{s}, \\bm{a}, r, \\hat{\\bm{s}}, I\\right)}\\in\\mathcal{B}_{\\mathrm{mb}}$}{\n Derive $\\widehat{Q}$ using \\eqref{eq:sacTargetQ}, where $\\tilde{\\bm{a}}{\\sim}\\bm{\\pi}_{\\tilde{\\vartheta}}{(\\cdot|\\hat{\\bm{s}})}$\\;\n }\n $\\bm{\\varphi}_i \\gets \\bm{\\varphi}_i {-} \\frac{1}{|\\mathcal{B}_\\mathrm{mb}|}\\sum_{\\forall b\\in\\mathcal{B}_{\\mathrm{mb}}}\\!\\!\\widehat{\\nabla}J_\\mathrm{Q}(\\bm{\\varphi}_i)$, for $i{\\in}\\{1,2\\}$\\;\n $\\bm{\\vartheta}{\\gets}\\bm{\\vartheta} {-} \\frac{1}{|\\mathcal{B}_\\mathrm{mb}|} \\!\\sum_{\\forall b\\in\\mathcal{B}_{\\mathrm{mb}}}\\!\\!\\!\\widehat{\\nabla}\\!J_{\\!\\mathrm{\\pi}}(\\bm{\\vartheta})$, where \\eqref{eq:lossPolicy1} uses $\\widehat{\\bm{a}}$\\;\n $\\bm{\\tilde{\\varphi}}_i \\gets \\nu\\bm{\\varphi}_i{+}(1{-}\\nu)\\bm{\\tilde{\\varphi}}_i$ for $i{\\in}\\{1,2\\}$\\;\n $\\bm{\\tilde{\\vartheta}} \\gets \\nu\\bm{\\vartheta}+(1-\\nu)\\bm{\\tilde{\\vartheta}}$\\;\n }\n\t}\n\\caption{SAC-Based Algorithm for Device Selection in the Coexistence of URLLC and Distributed AI}\n\\label{alg:alg1}\n\\end{algorithm}\n\n\\algorithmcfname\\,\\ref{alg:alg1} summarizes the learning procedure of our \\gls{sac}-based device selection solution in the coexistence scenario. Leveraging the off-policy learning capability of such an algorithm, one can train the \\glspl{dnn} via either the virtual network (e.g., digital twin or realistic simulations) or an operating network (e.g., in safe exploration mode). On the former, $\\bm{a}_k$ can be sampled using the behavior policy in each episode, and episodes can run in parallel to speed up the training. Nevertheless, on the operating network, this algorithm can switch to on-policy learning (i.e., $\\bm{a}_k$ is sampled via the most updated policy, and episodes run consecutively). A hybrid strategy in which the \\glspl{dnn} are trained first with a virtual network and then tuned via an operational network could potentially result in a more efficient learning procedure.\n\nIn the following section, we describe our simulator's modeling principles and its configuration.\n\\section{Simulation Methodology and Configuration}\\label{sec:simulation}\nFor simulating the deployment where \\gls{urllc} and distributed \\gls{ai} services coexist, we considered a factory automation scenario, as demonstrated in \\figurename\\,\\ref{fig:simulationSetup}. More explicitly, we designed a 3D model of a small factory of size $40\\times40\\times10$\\,m$^3$ with $4$ \\glspl{gnb} at the height of $8$\\,m, and with an inter-site distance of $20$\\,m.\n\\subsection{Channel Model}\nIn our simulations, we considered the channel model for the \\gls{inf-dh} use case, where the \\gls{gnb} and the devices are placed, respectively, higher and under the average height of the clutters \\cite{3GPP38901}. The clutters in \\gls{inf-dh} use case typically represent small to medium-sized metallic machines and irregularly shaped objects. In the propagation model, the path loss is calculated by tracing the degradation in signal strength over distance under \\gls{los} and \\gls{nlos} circumstances. The path loss under \\gls{los} and \\gls{nlos} assumptions are given by \\cite{3GPP38901}\n\\begin{subequations}\\label{eq:pathLoss}\n\\begin{align}\n &\\hspace{-1.31mm}PL_{\\mathrm{LOS}}{(d_{\\mathrm{3D}})}\\mathrm{[dB]}{=}31.84{+}21.5\\log_{10}\\!{\\left(d_{\\mathrm{3D}}\\right)}{+}19\\log_{10}\\!{\\left(f_{\\mathrm{c}}\\right)}, \\label{eq:PL_LOS}\\\\\n &\\hspace{-1.31mm}PL_{\\mathrm{NLOS}}{(d_{\\mathrm{3D}})} \\mathrm{[dB]} {=} \\max{\\left(PL_{\\mathrm{LOS}}{(d_{\\mathrm{3D}})}, PL_{\\mathrm{DH}}{(d_{\\mathrm{3D}})} \\right)},\\label{eq:PL_NLOS}\n\\end{align}\n\\end{subequations}\nwhere\n\\begin{equation}\\label{eq:PL_InFDH}\nPL_{\\mathrm{DH}}{(d_{\\mathrm{3D}})} \\mathrm{[dB]}{=}33.63+21.9\\log_{10}{\\left(d_{\\mathrm{3D}}\\right)}+20\\log_{10}{\\left(f_{\\mathrm{c}}\\right)}.\n\\end{equation}\nIn above equations, $d_{\\mathrm{3D}}$ and $f_{\\mathrm{c}}$ denote the $3$D distance between the device and \\gls{gnb}, and the center frequency, respectively. \nIn \\gls{inf-dh}, the \\gls{los} probability is described by \\cite{3GPP38901}\n\\begin{equation}\\label{eq: Pr_LOS}\n\\mathrm{Pr}_{\\mathrm{LOS}}{(d_{\\mathrm{2D}})} {=} \\exp\\left(\\frac{d_{\\mathrm{2D}}\\ln\\!{\\left(1-r_\\mathrm{clut}\\right)}\\left(h_\\mathrm{clut}-h_\\mathrm{device}\\right)}{d_{\\mathrm{clut}}\\left(h_\\mathrm{gNB}-h_\\mathrm{device}\\right)}\\right),\n\\end{equation}\nwhere $d_{\\mathrm{2D}}$ represents the ground distance between \\gls{gnb} and the device. Besides, $h_\\mathrm{gNB}$, $h_\\mathrm{device}$, $d_{\\mathrm{clut}}$, $h_{\\mathrm{clut}}$, and $r_{\\mathrm{clut}}$ denote the \\gls{gnb}'s antenna height, devices' antenna height, the typical clutter size, height and density, and are set in our simulations to $8$\\,m, $1.5$\\,m, $2$\\,m, $6$\\,m, and $60\\%$, respectively. The shadowing for \\gls{los} and \\gls{nlos} is assumed to follow a zero-mean log-normal distribution with standard deviation $4.3$ and $4$ in dB, respectively. In our link level simulations, we first set the position of the $4$ \\glspl{gnb}, as shown in \\figurename\\,\\ref{fig:simulationSetup}. Then, for each pair of possible device positions and the $4$ \\gls{gnb} positions, we generate uncorrelated link conditions with $\\mathrm{Pr}_{\\mathrm{LOS}}{(\\cdot)}$ for \\gls{los}, and ${1-\\mathrm{Pr}_{\\mathrm{LOS}}{(d_{\\mathrm{2D}})}}$ for \\gls{nlos}. Nevertheless, the large scale parameters are generated with correlation distance of $10$\\,m in the horizontal plane. Then, we followed the spatial consistency procedure in \\cite[\\S7.5,\\S7.6.3]{3GPP38901} to generate small scale parameters and channel coefficients, and used the parameters in \\cite[Table 7.5-6 Part-3]{3GPP38901}.\n\n\\begin{comment}\n \\begin{equation} \\label{eq:PL_shadow}\n PL \\mathrm{[dB]}{=}\\begin{cases}\n PL_{\\mathrm{LOS}}+X_\\sigma^{\\mathrm{LOS}}, & \\text{with $\\mathrm{Pr}_{\\mathrm{LOS}}$},\\\\\n PL_{\\mathrm{NLOS}}+X_\\sigma^{\\mathrm{NLOS}}, & \\text{with $1-\\mathrm{Pr}_{\\mathrm{LOS}}$},\n \\end{cases}\n \\end{equation}\n \n \\begin{equation}\\label{eq:PL_shadow2}\n PL \\mathrm{[dB]}{=}\\mathrm{Pr}_{\\mathrm{LOS}}{\\left(PL_{\\mathrm{LOS}}+X_\\sigma^{\\mathrm{LOS}}\\right)},\n \\end{equation}\n\n \\begin{equation}\\label{eq:beta}\n \\beta_c=\\sqrt{\\frac{P_c}{P_{\\mathrm{L}}X}}\n \\end{equation}\n\\end{comment}\n\\begin{figure}[t]\n\t\\centering\n\t\\includegraphics[width=.95\\columnwidth,keepaspectratio]{./Components/Figs/JSAC/sim.pdf}\n\n\t\\caption{The simulation setup.}\n\t\\label{fig:simulationSetup}\n\t\\vspace{-6mm}\n\\end{figure}\n\\subsection{Radio Network Simulator and \\gls{drl} Agent}\nThe radio network simulator is event-based, \\gls{3gpp} compliant, and operates at \\gls{ofdm} symbol resolution. We considered numerology one from \\cite{dahlman5GNr}, implying that each slot and symbol are $0.5$\\,ms and $33.33$\\,$\\mu$s long, respectively. We assumed the channel response matrix in \\eqref{eq:channel} remains constant during one slot.\nTo ensure seamless training of distributed \\gls{ai} until the end of a simulation, we considered \\gls{rlc} in \\gls{am} for distributed \\gls{ai}. Nevertheless, the \\gls{rlc} retransmissions are slow and unlikely to benefit \\gls{urllc} packets with their tight delay bounds \\cite{dahlman5GNr}. Accordingly, we configured the \\gls{rlc} in \\gls{um} for \\gls{urllc} flow. Within URLLC flow, \\gls{ul} and \\gls{dl} \\gls{urllc} traffic are scheduled based on round robin and delay, respectively (i.e., the packet that waited longer in the queue is scheduled first). Moreover, we used proportional-fair scheduling for distributed AI traffic in both directions. Nevertheless, we assumed strict priority scheduling where \\gls{urllc} flow has higher priority than \\gls{ai} flow, implying that \\gls{ai} packets cannot be scheduled unless there is no \\gls{urllc} packet in the queues.\n\nUpon transmission, one or several packets are drawn from the head of the corresponding \\gls{rlc} buffer, depending on the selected \\gls{mcs} on lower layers. Alternatively, \\gls{rlc} could perform segmentation of packets into smaller segments to fit them into transport blocks via which the packets are transmitted. Upon reception, the received instantaneous \\gls{sinr} of each transport block (which depends on the radio channel and the dynamic interference of other devices' transmissions) determines an error probability. Consequently, the receptive \\gls{rlc} entity reassembles successfully decoded segments and delivers them to the application layer. For availability calculation on the application layer, we considered a \\gls{urllc} packet lost if it is not fully received before its corresponding delay bound, followed by applying $T_{sv}$ as in \\eqref{eq:availEst} where, in final availability distributions, $\\Delta t$ is the duration of one simulation, starting from the first action time (i.e., $t_1$) until $t_K$. \\tablename\\,\\ref{tab:simSetup} presents the simulation parameters.\n\nThe \\gls{urllc} traffic is represented by periodic \\gls{ul} and \\gls{dl} traffic, with delay bounds of $6$\\,ms and $4$\\,ms as well as the sizes of $64$ bytes and $80$ bytes, respectively, both with a period of $6$\\,ms. Such \\gls{urllc} traffic characterization aligns with the machine control use case for mobile robots in \\cite{3GPP22104}.\nMotivated by \\cite{3GPP22874AiModel}, we assumed that the shared \\gls{dnn} architecture (i.e., used on the devices and the central node) follows MobileNets \\cite{mobileNets}, a class of efficient \\gls{dnn} models based on a streamlined architecture for mobile and embedded vision applications. We considered $0.25$\\,MobileNet-$224$ in \\cite{mobileNets}, implying that the \\gls{dnn} model has $0.5$ million parameters.\nTo model the distributed \\gls{ai} traffic, we assumed \\gls{fl} and 32 bits quantization for each model parameter, implying that each model (local or global) can be represented as a packet of size $2$\\,MB. Nevertheless, our solution applies to settings where other quantization/compression approaches reduce the communication overhead \\cite{Sindri2020Linear}.\n\n\\begin{table}[t]\n\\centering\n\\caption{Simulation Parameters.}\n\\label{tab:simSetup}\n\\scalebox{0.75}{\\begin{tabular}{l||l}\n\t\\hline\n\t\\multicolumn{2}{c}{\\textbf{Radio Network Simulator Parameters}} \\\\\n\t\\hline\n\t\\textbf{Parameter}& \\textbf{Value}\\\\\n\t\\hline\n\tDeployment & $4$ \\glspl{gnb}\\\\\n\tDuplex/Carrier frequency & FDD/$2.6$\\,GHz\\\\\n\t\\gls{gnb} antenna height& $8$\\,m \\\\\n\tDevices' height &$1.5$\\,m \\\\\n\tNumber of antenna elements in \\gls{gNB}/device & $2$/$2$\\\\ \n\tBandwidth& $40$\\,MHz\\,\\\\\n\tTTI length/Subcarrier spacing& $0.5$\\,ms/$30$\\,KHz \\\\\n\tUL/DL transmit power & $0.2$\\,W/$0.5$\\,W \\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{urllc}\\,Trans. (\\gls{mac}) & $3/2$ \\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{ai}\\,Trans. (\\gls{mac}) & $10/10$\\\\\n\tMax\\,num\\,of\\,\\gls{ul}/\\gls{dl}\\,\\gls{ai}\\,Trans. (\\gls{rlc}) & $8/8$ \\\\\n\t\\gls{ul}/\\gls{dl}\\,\\gls{urllc}\\,delay bound& $6/4$\\,ms \\\\\n\t\\gls{ul}/\\gls{dl} \\gls{urllc} survival time, $T_{\\mathrm{sv}}$& $6/6$\\,ms \\\\\n\tThe total number of \\gls{ai} devices, $N$& $50$ \\\\\n\tThe required number of local models for \\eqref{eq:globalUp} to progress, $n$ & $15$\\\\\n\t$\\upsilon$/$\\zeta$ in the reward function \\eqref{eq:reward} & $0.5$/$100$\\\\\n $T^{\\max}$& $10$\\,s \\\\\n\t\\hline\n\t\\multicolumn{2}{c}{\\textbf{DRL Agent Parameters}} \\\\\n\t\\hline\n\t\\textbf{Parameter}& \\textbf{Value}\\\\\n\t\\hline\n\tDiscount factor, $\\lambda$ & $0.1$\\\\\n\tTraining mini-batch size, $|\\mathcal{B}_\\mathrm{mb}|$ & $200$\\\\\n\tReplay buffer size & $1\\,000\\,000$\\\\\n\tNeural network hidden layers (all six) & $128\\times128$\\\\\n\tPrioritized replay buffer $\\alpha$/$\\beta$ in \\cite{prioReplay} & $0.6$/$0.4$\\\\\n\tLearning rate (for critic, actor and entropy) & $0.0003$ \\\\%(for critic, actor and entropy)\n\t$\\nu$ (for smooth update) & $0.002$\\\\% (for both target actor and critic)\n\t\\hline\n\\end{tabular}}\n\\end{table}\n\nIn our simulation setup, the \\gls{drl} agent resides in a separate server and communicates with the radio network simulator via a ZeroMQ interface. We ran the \\gls{drl} agent on a server with Intel(R) Xeon(R) Gold 6132 CPU @ 2.60\\,GHz, $8$ cores and $64$\\,GB of RAM. In the exploration phase, we trained the \\gls{drl} agent for $7\\,000$ episodes of $50$-iteration length, and our simulation time differed depending on how long each iteration (i.e., $d_k^{\\mathrm{AI}}(\\cdot)$) took. Nevertheless, our \\gls{sac}-based algorithm converged with significantly fewer iterations at around $150\\,000$ iterations. The simulation parameters are given in \\tablename\\,\\ref{tab:simSetup}.\n\nIn the following section, we run comprehensive simulations to study the impact of various design parameters including the distributed \\gls{ai} and \\gls{urllc} load, the number of selected \\gls{ai} devices, and slicing the network resources between \\gls{urllc} and distributed \\gls{ai} services.\n\\section{Results and Discussion}\\label{sec:preformance}\n\\begin{figure*}[t]\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/availUlDl}\n \t\\caption{}\n \\label{fig:availUrllc}\n \\end{subfigure}\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/delayBox}\n \t\\caption{}\n \\label{fig:delayBox}\n \\end{subfigure}\n\t\\caption{The empirical CDF of URLLC devices' availability, $\\hat{\\alpha}_i^\\Gamma$, in (a), and the distributed \\gls{ai} training delay, $d_k^{\\mathrm{AI}}$ in (b), both for the benchmark with semi-random \\gls{urllc} devices. Each box plot represents the minimum, $25$th percentile, median, $75$th percentile, and maximum of the training delay distribution.\n\t\\label{fig:kpi}\n\\end{figure*}\nFor the performance evaluation of our \\gls{sac}-based algorithm (shown as \\texttt{dRlAgent} in the figures), we set up two benchmarks.\n\\begin{enumerate}\n \\item Semi-random \\gls{urllc} devices: In this benchmark, we set up $10$ \\gls{urllc} devices, and assume that each device moves in 1D at a speed of $30$\\,km/h within a short distance of a position that is maintained in different simulations. Yet, the movement direction changes randomly in different seeds.\n \\item Random \\gls{urllc} devices: In this benchmark, we set up $20$ \\gls{urllc} devices. At each simulation, the \\gls{urllc} devices appear at random positions and move in 1D in a random direction at a speed of $30$\\,km/h within a short distance of that position.\n\\end{enumerate}\nFor each of these benchmarks, we compared our proposed solution with three types of baselines: \n\\begin{itemize}\n \\item \\texttt{singleURLLC}: We did not set any distributed AI traffic in our industrial automation scenario and the \\gls{urllc} devices leveraged the entire $40$\\,MHz bandwidth.\n \\item \\mixs{m}: In addition to \\gls{urllc} devices, we had a total of $50$ \\gls{ai} devices. We kept the required number of devices that the central node waits for constant (i.e., $n{=}15$), and randomly picked a set of $m$ participating devices (i.e., $m_k=m, \\forall k\\in\\mathbb{N}$), where $m\\in\\{15,20,30,40,50\\}$.\n \\item \\texttt{slicing[$m$]}: We assigned $25\\%$ of resources to the \\gls{urllc} service (i.e., $10$\\,MHz bandwidth and $0.125$\\,W for total \\gls{dl} transmit power), and the rest to the distributed \\gls{ai} service. We kept $|\\mathcal{U}|$, $|\\mathcal{N}|$, $n$ the same as \\mixs{m} baseline. Besides, we randomly picked a set of $m$ participating devices (i.e., $m_k=m, \\forall k\\in\\mathbb{N}$), where $m\\in\\{15,20\\}$.\n\\end{itemize}\nThe \\texttt{singleURLLC} baseline represents the best possible performance on the \\gls{urllc} availability in our scenario. \nIn \\texttt{dRlAgent}, and to calculate the reward in \\eqref{eq:reward}, we set $\\upsilon$ and $\\zeta$ to $0.5$, and $100$, respectively.\nBesides, we assumed that all \\gls{urllc} devices serve a single use case, and thus, set the availability requirement to $0.99$ (i.e., $\\alpha^\\mathrm{req}=\\alpha_i^\\mathrm{req}=0.99, \\forall i\\in\\mathcal{U}$). \nFor \\texttt{singleURLLC} evaluations, we ran $102$-second simulations $300$ times (with different seeds). In addition, \\texttt{dRlAgent}, \\mixs{m}, and \\texttt{slicing[$m$]} were evaluated with $300$ simulations of $50$-iteration length, resulting in different simulation lengths. Note that there is no progress in distributed AI if all $n$ local models are not collected by the central node within a time duration of $T^{\\max}$, and thus, \\gls{sac} iterations could differ from distributed AI iterations. Regardless, if there is no strict latency constraint from distributed \\gls{ai} task, we can tune $T^{\\max}$ sufficiently large to ensure that time-outs happen rarely. Although our radio network simulator can handle such situations, and for the sake of fair comparison, we set $T^{\\max}{=}10$\\,s. \n\n\\subsection{Semi-random URLLC Devices}\n\\figurename\\,\\ref{fig:kpi} shows the distribution of our main \\glspl{kpi}. \\figurename\\,\\ref{fig:availUrllc} illustrates the empirical \\gls{cdf} of \\gls{urllc} devices' availability, where each sample is the \\gls{ul} or \\gls{dl} availability of one \\gls{urllc} device in one simulation during its whole simulation time. In \\texttt{slicing[$m$]}, due to slicing of the bandwidth and \\gls{gnb} transmission power, the availability distribution is identical for any arbitrary $m$.\nAs this figure shows, compared to \\texttt{singleURLLC} and \\texttt{slicing[$m$]}, the availability of the \\gls{urllc} devices decreases in \\mixs{15} and \\mixs{20}, likely, because of the introduced interference by AI devices in the neighboring cells. Although the scheduler adjusts the \\gls{mcs} to deal with this additional interference\\footnote{Note that such decrease in availability occurs regardless of the scheduler configuration. For example, higher target block error rate cannot overcome the extra interference, and lower target block error rate leads to extra delay, both resulting in lower availability.}, it still affects the availability, such that the availability requirement of $0.99$ can be met with sensitivity of around $0.1$ in \\mixs{15} and \\mixs{20}, rather than $0.012$ in \\texttt{singleURLLC} and $0.014$ in \\texttt{slicing[$m$]} (see \\eqref{eq:availReq}). In \\mixs{m} baselines, most of the availability samples are still greater than or equal to $0.98$. Unlike many conventional services, such decrease is not acceptable for \\gls{urllc} service.\nDespite the impact of introducing the large load of the distributed \\gls{ai} service, we observe that our \\texttt{dRlAgent} solution keeps the \\gls{urllc} devices' availability close to the \\texttt{singleURLLC} and \\texttt{slicing[$m$]} up to $\\alpha^\\mathrm{req}$, and can support the availability requirement of $0.99$ with sensitivity of $0.013$.\n\n\\begin{figure*}[t]\n \\begin{subfigure}{0.49\\textwidth}\n\t\\centering\n \t\\input{Components/Figs/JSAC/numberOfSelDev_PMF}\n \t\\caption{}\n \\label{fig:PMF}\n \\end{subfigure}\n \\begin{subfigure}{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/deviceParticRatio}\n \t\\caption{}\n \\label{fig:selRatio}\n \\end{subfigure}\n\t\\caption{The empirical probability mass function of the number of (a) selected devices, and (b) \\gls{ai} device participation ratio, both for \\texttt{dRlAgent}.}\n\t\\label{fig:dRlAgentPerf}\n\\end{figure*}\n\n\\figurename\\,\\ref{fig:delayBox} depicts the distributed \\gls{ai} training delay. Each box shows the minimum, $25$th percentile, median, $75$th percentile, and the maximum of the observed training delay samples. In general, as \\texttt{slicing[$m$]} and \\mixs{m} boxes, and our results in \\cite{ganjInterplay} suggest, the training delay grows as the number of selected devices increases. However, it is more likely for central node to wait excessively for stragglers, and thus reach time-out when $m{=}n$ (i.e., in both \\texttt{slicing[$15$]} and \\mixs{15}). Moreover, the lower training delay statistics in \\mixs{15} and \\mixs{20} than \\texttt{slicing[$15$]} and \\texttt{slicing[$20$]}, respectively, suggests that distributed \\gls{ai} service in \\mixs{m} generally consumes more resources than the allocated resources in \\texttt{slicing[$m$]}.\nAs this figure indicates, compared to the most competitive baseline (i.e., \\mixs{15}), our \\texttt{dRlAgent} decreases the median training delay by $36\\%$, while the maximum observed training delay is $2.6$\\,s, i.e., $43\\%$ less than \\mixs{20}, which has the lowest maximum observed training delay among the baselines.\n\n\\figurename\\,\\ref{fig:dRlAgentPerf} demonstrates the device selection policy in the evaluation phase, $\\bm{\\pi}_k^\\mathrm{u}$, for \\texttt{dRlAgent}. \\figurename\\,\\ref{fig:PMF} shows the empirical \\gls{pmf} of the number of selected \\gls{ai} devices, $m$ for different iterations. As this figure indicates, given that we set $n{=}15$ in all of our evaluations, our \\gls{sac}-based solution selected at least $1$ extra \\gls{ai} device for more than $40\\%$ of iterations. Such selection of extra devices implies that our device selection solution could still leverage the diversity introduced by extra \\gls{ai} devices, even in our bandwidth-limited deployment. Carefully selected extra devices reduce the sensitivity to the straggler problem and therefore reduce the overall latency without substantial impact on the interference footprint. \n\\figurename\\,\\ref{fig:selRatio} represents the device selection ratio for each \\gls{ai} device that is the number of times an \\gls{ai} device is selected by the \\gls{drl} agent divided by the total number of iterations in the evaluation phase. According to \\figurename\\,\\ref{fig:selRatio}, $9$ \\gls{ai} devices are selected in more than $90\\%$ of the iterations. Also, $24\\%$ of the \\gls{ai} devices are selected with a ratio of $0.1$ to $0.9$. \n\n\\subsection{Random URLLC Devices}\n\\figurename\\,\\ref{fig:kpiRand} illustrates the distribution of availability for \\gls{urllc} devices (in \\figurename\\,\\ref{fig:availUrllcRand}) and training delay of distributed \\gls{ai} service (in \\figurename\\,\\ref{fig:delayBoxRand}). As \\figurename\\,\\ref{fig:availUrllcRand} shows, up to $\\alpha^{\\mathrm{req}}$, \\texttt{dRlAgent} keeps the availability of \\gls{ai} devices close to \\texttt{singleURLLC}, even though the \\gls{urllc} traffic appears at random locations in different seeds.\nSurprisingly, \\texttt{slicing[$m$]} can support $0.99$ availability requirement with $\\gamma{=}0.02$, which is more than twice the sensitivity \\texttt{singleURLLC} and \\texttt{dRlAgent} can support. Compared to the semi-random benchmark, in this benchmark, the number of \\gls{urllc} devices is doubled and they can appear in any part of the factory. Hence, \\texttt{slicing[$m$]} might associate a large number of devices to the same \\gls{gnb}, contributing to lower availability performance with $10$\\,MHz bandwidth and $0.125$\\,W maximum \\gls{dl} transmission power for \\gls{urllc} devices. In \\figurename\\,\\ref{fig:delayBoxRand}, as expected, the delay distribution of \\texttt{slicing[$m$]}, for $m{\\in}\\{10,15\\}$, does not show any significant difference with \\figurename\\,\\ref{fig:delayBox}, and we observe a slight raise in overall training delay distribution of \\mixs{m} for $m{\\in}\\{10,15\\}$ (in the order of tens of milliseconds).\nNevertheless, our \\texttt{dRlAgent} improves the median training delay at least by $30\\%$ (compared to \\mixs{15}), and decreases the maximum observed training delay at least by $15\\%$ (compared to \\mixs{20}). Therefore, even in fully-random \\gls{urllc} devices benchmark, our \\texttt{dRlAgent} successfully orchestrates the distributed \\gls{ai} traffic such that i) the impact on the availability of the \\gls{urllc} devices, given their requirement, is negligible, and ii) it reaches lower training delay statistics.\n \n\\begin{figure*}[t]\n \\begin{subfigure}[t]{0.49\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/availUlDlRand}\n \t\\caption{}\n \\label{fig:availUrllcRand}\n \\end{subfigure}\n \n \\begin{subfigure}[t]{0.48\\textwidth}\n\t \\centering\n \t\\input{Components/Figs/JSAC/delayBoxRand}\n \t\\caption{}\n \\label{fig:delayBoxRand}\n \\end{subfigure}\n\t\\caption{The empirical CDF of URLLC devices' availability, $\\hat{\\alpha}_i^\\Gamma$, in (a), and the distributed \\gls{ai} training delay, $d_k^{\\mathrm{AI}}$ in (b), both for the benchmark with random \\gls{urllc} devices. Each box plot represents the minimum, $25$th percentile, median, $75$th percentile, and maximum of the observed training delay samples.\n\t\\label{fig:kpiRand}\n\\end{figure*}\n\\section{Conclusions}\\label{sec:conculsions}\nIn this paper, we investigated the performance optimization of distributed \\gls{ai} when it coexists with the \\gls{urllc} service with stringent operational requirements. We proposed a \\gls{drl}-powered framework to run distributed AI using a carefully selected subset of devices with the objective of minimizing the AI training delay while maintaining the \\gls{urllc} communication service availability requirements. Our comprehensive \\gls{3gpp}-compliant 5G simulations indicate that our scheme can significantly decrease the total training delay while keeping URLLC devices' availability near the single service scenario (i.e., when all network resources are allocated to the URLLC devices). This paper provides useful insights on how to adaptively control \\gls{ai} traffic (via device selection) to ensure a sustainable coexistence between distributed \\gls{ai} and \\gls{urllc}.\n\nAn alternative approach to control the load of distributed \\gls{ai} is through quantization of the exchanged messages. These approaches often reduce the communication overhead per iteration (and thereby interference footprint) at the expense of some extra iterations to reach convergence. Potential future work is to develop novel approaches that adaptively change the quantization level based on not only the distributed AI algorithm but also the load of the URLLC and the interference footprint of the network.\n{\\appendices\n\\section{Example \\ref{ex1} Show case} \\label{app:1}\nTowards solving the distributed optimization problem \\eqref{eq: MainOptimProblem}, the central node performs the gradient descent update at each iteration $k$ as\n\\begin{equation}\\label{eq:algUpdate}\n\\mbox {\\boldmath $w$}_{k+1} = \\mbox {\\boldmath $w$}_k - \\frac{\\eta_k}{n}\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)},\n\\end{equation}\nwhere the right hand side of the equation represents $A{(\\cdot)}$ in \\eqref{eq:globalUp}, $\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}$ is the true gradient's noisy estimation at the $i$th \\gls{ai} device, and $\\mathcal{N}_{n,k}$ is the set of \\gls{ai} devices from which central node received the first $n$ local updates at the $k$th iteration. We assume that each \\gls{ai} device employs mini-batch gradient descent, and to simplify our notation, the size of the mini-batches are assumed the same for all devices in all iterations. Hence, the overall gradient estimate using the local estimates of the \\gls{ai} devices can be obtained as\n\\begin{equation}\\label{eq:noisyGradient}\n\\frac{1}{n}\\!\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)} \\coloneq \\nabla f{(\\mbox {\\boldmath $w$}_k)}+\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)},\n\\end{equation}\nwhere $\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)} \\coloneq \\frac{1}{n}\\!\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\mbox {\\boldmath $e$}_{i,k}$, and $\\mbox {\\boldmath $e$}_{i,k}$ is the residual term of the $i$th device's estimate at the $k$th iteration, while $\\nabla f{(\\mbox {\\boldmath $w$}_k)}$ is the true gradient (i.e., the gradient of the batch gradient descent on centralized training). Let us make the following assumptions.\n\\begin{assumption}\\label{as:1}\n\\cite{Bottou2018SIAM,flConvergNoniid} The objective functions $f_i$, $\\forall i\\in\\mathcal{N}$, are all $L$-smooth, with Lipschitz constant $L>0$.\n\\end{assumption}\n\n\\begin{assumption}\\label{as:2}\n\\cite{Bottou2018SIAM,flConvergNoniid} The objective functions $f_i$, $\\forall i\\in\\mathcal{N}$, are all strongly convex, with constant $\\mu>0$.\n\\end{assumption}\n\n\\begin{assumption}\\label{as:3}\n\\cite{Bottou2018SIAM} There exist $\\beta_2\\geq (\\beta_1+1)^2 > 0$ that, for $\\forall k\\in \\left[K\\right]$ and $\\forall i\\in \\mathcal{N}$, the objective function $f(\\mbox {\\boldmath $w$})$ and the \\gls{dgd} algorithm have the following limits:\n\\begin{subequations}\\label{eq:momentLimits}\n\\begin{align}\n & \\nabla f{(\\mbox {\\boldmath $w$}_k)}^\\intercal \\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]}\\geq \\beta_1 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2 , \\label{eq:subspace}\\\\\n & \\nabla f{(\\mbox {\\boldmath $w$}_k)}^\\intercal \\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]} \\leq \\beta_2 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2. \\label{eq:upperBoundLim}\n\\end{align}\n\\end{subequations}\nIt is worth mentioning that \\eqref{eq:subspace} implies that the noisy estimation of the gradient is on the same half space with the true gradient, and \\eqref{eq:upperBoundLim} is a weaker assumption of the bounded variance of $\\sum_{i\\in\\mathcal{N}_{n,k}}\\!\\!\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}/n$, and only bounds it by the actual gradient, $\\nabla f(\\mbox {\\boldmath $w$}_k)$.\n\\end{assumption}\n\\begin{assumption}\\label{as:4}\n\\cite{Bottou2018SIAM,flConvergNoniid} The variance of the gradient norms in each device is bounded, i.e.,\n\\begin{equation}\\label{eq:sigma}\n \\mathbb{E}{\\left[\\norm{\\mbox {\\boldmath $e$}_{i,k}}_2^2\\right]}\\leq \\sigma^2, \\forall k\\in \\left[K\\right], \\forall i\\in \\mathcal{N}.\n\\end{equation}\nSince, $\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)}$ is an unbiased estimator of $\\mathbb{E}{\\left[\\mbox {\\boldmath $e$}_{i,k}\\right]}$, we have\n\\begin{equation}\n \\mathbb{E}{\\left[\\norm{\\Bar{\\mbox {\\boldmath $e$}}_k^{(n)}}_2^2\\right]}\\leq \\frac{\\sigma^2}{n}, \\forall k\\in \\left[K\\right].\n\\end{equation}\nIt is worth noting that \\eqref{eq:upperBoundLim} and \\eqref{eq:sigma} results in \n\\begin{equation}\\label{eq:sgdVarComb}\n \\mathbb{E}{\\left[\\norm{\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]}\\leq\\sigma^2+\\beta_2 \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2.\n\\end{equation}\n\\end{assumption}\nThen, if Assumptions~\\ref{as:1}-\\ref{as:4} hold, for a fixed learning rate $\\eta$ that is $0<\\eta\\leq\\frac{\\beta_1+1}{(2\\beta_2+1)L}$, we have \\cite[Theorem 4.6]{Bottou2018SIAM}\n\\begin{multline}\\label{eq:convexConverge}\n\\mathbb{E}{\\left[f{\\left(\\mbox {\\boldmath $w$}_k\\right)} {-} f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}\\right]} \\leq \\frac{\\eta L \\sigma^2}{2n\\beta_1 \\mu} +\\\\ \n{\\left(1{-}\\eta \\beta_1 \\mu\\right)^{k-1}}{\\left(f{\\left(\\mbox {\\boldmath $w$}_1\\right)}{-}f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}{-}\\frac{\\eta L\\sigma^2}{2n\\beta_1 \\mu}\\right)},\n\\end{multline}\nwhere the first term represents the gap to the expected optimal value that \\gls{dgd} converges to when $k{\\to} \\infty$ for a fixed learning rate, and the second term is the convergence rate. Using the learning rate bound and Assumption \\ref{as:3}, and the fact that $\\mu {\\leq} L$(as a result of Assumption \\ref{as:1} and \\ref{as:2}), we can derive that $\\eta \\beta_1 \\mu < 1$, and hence, $(1-\\eta \\beta_1 \\mu)$ is a contraction factor.\n\nLet us assume that our initial point is within a bounded region with respect to the final point that we can converge (i.e., the last parenthesis in \\eqref{eq:convexConverge} is less than or equal to $W^{\\mathrm{A}}$). Note that the additional term of $\\eta L \\sigma^2/{2n\\beta_1 \\mu}$ reflects that \\gls{dgd} cannot converge to the optimal value, but instead, to a neighborhood of $f(\\mbox {\\boldmath $w$}^\\star)$.\nThen, the minimum required number of iterations, $K_{\\min}$, to reach $\\epsilon$-accuracy becomes\n\\begin{align}\\label{eq:kConvex}\nK_{\\min} \\geq \\log_{{\\left(1{-}\\eta \\beta_1 \\mu\\right)}}&{\\left(\\epsilon{-}\\frac{\\eta L\\sigma^2}{2n\\beta_1 \\mu}\\right)} - \\log_{{\\left(1{-}\\eta \\beta_1 \\mu\\right)}}\\!\\!{\\left({W}\\right)} +1.\n\\end{align}\nThen, \\eqref{eq:kConvex} can be simplified as\n\\begin{equation}\\label{eq:kConvexSimpleApp}\nK_{\\min} \\geq \\log_{b}{\\left(\\frac{W^{\\mathrm{A}}}{\\epsilon{-}\\frac{z^{\\mathrm{A}}}{n}}\\right)} +1,\n\\end{equation}\nwhere $b\\coloneq1/\\left(1{-}\\eta \\beta_1 \\mu\\right)>1$, and $z^{\\mathrm{A}}$ is a positive constant which depends on the learning rate, Lipschitz constant, strong convexity, and the error in the gradient estimates for $n{=}1$.\n\n\\begin{comment}\n \\begin{equation} \\label{eq:kprop}\n K\\propto \\log{\\left(\\epsilon-\\frac{z}{n}\\right)}. \n \\end{equation}\n\\end{comment}\n\\section{Example \\ref{ex2} Show case}\\label{app:2}\n\\begin{assumption}\\label{as:5}\n\\cite{Bottou2018SIAM} The objective functions $f_i$, $\\forall i \\in \\mathcal{N}$, are lower bounded by a scalar $f_{\\mathrm{inf}}$ for all sequences of $\\mbox {\\boldmath $w$}_k$.\n\\end{assumption}\nThe non-convex objective functions may contain several local minima and other stationary points. Therefore, we define the convergence criteria on the gradient.\nThen, if Assumptions~\\ref{as:1}, \\ref{as:3}-\\ref{as:5} hold, for a fixed learning rate satisfying $0<\\eta\\leq \\frac{\\beta_1+1}{L(2\\beta_2+1)}$, we have \\cite[Theorem 4.8]{Bottou2018SIAM}\n\\begin{equation}\\label{eq:nonConvexIneq}\n\\mathbb{E}{\\left[\\frac{1}{K}\\!\\sum_{k=1}^K \\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]} \\leq \\frac{\\eta L \\sigma^2}{n{(\\beta_1{+}1)}} {+} \\frac{2\\left(f{(\\mbox {\\boldmath $w$}_1)}{-}f_{\\mathrm{inf}}\\right)}{\\eta{(\\beta_1{+}1)}K}.\n\\end{equation}\nTo understand \\eqref{eq:nonConvexIneq}, consider centralized training and batch gradient descent, where there exist no gradient noise, and $\\sigma^2$ becomes zero, resulting in $\\norm{\\nabla f{(\\mbox {\\boldmath $w$}_k)}}_2\\to0$ as $K$ enlarges. However, in \\gls{dgd}, the average norm of gradients converges to ${\\eta L \\sigma^2}/{n(\\beta_1{+}1)}$.\nNow, the required number of iterations, $K_{\\min}$, to reach $\\epsilon$-accuracy becomes\n\\begin{equation}\\label{eq:nonConvexKmin}\nK_{\\min}\\geq \\frac{2\\left(f{(\\mbox {\\boldmath $w$}_1)}-f_{\\mathrm{inf}}\\right)}{\\eta{(\\beta_1{+}1)}\\left(\\epsilon{-}\\frac{\\eta L \\sigma^2}{n\\left(\\beta_1{+}1\\right)}\\right)}.\n\\end{equation}\nIn \\eqref{eq:nonConvexKmin}, we observe that loosening the convergence criteria (i.e., as $\\epsilon$ increases) leads to a higher required number of iterations to reach $\\epsilon$-accuracy. We can simplify \\eqref{eq:nonConvexKmin} as\n\\begin{equation}\\label{eq:kNonConvexSimpleApp}\nK_{\\min}\\geq \\frac{W^\\mathrm{B}}{\\epsilon{-}\\frac{z^\\mathrm{B}}{n}},\n\\end{equation}\nwhere $W^\\mathrm{B} {\\coloneq} 2\\left(f{(\\mbox {\\boldmath $w$}_1)}{-}f_{\\mathrm{inf}}\\right)/\\eta{(\\beta_1{+}1)}$, and $z^\\mathrm{B}$ is a function of the learning rate, Lipschitz constant, and error in the gradient estimates when $n{=}1$. Note that $\\epsilon$ should be set to a value that is larger than the neighborhood \\gls{dgd} can potentially converge to (i.e., $z^\\mathrm{B}$).\n\\section{Example \\ref{ex3} Show case}\\label{app:3}\nThere are two main differences between \\gls{fl} and \\gls{dgd}, \ni) there could be several local iterations in each \\gls{ai} device between two communications, \nand ii) the model parameters (i.e., the weights of the \\glspl{dnn}) are communicated, rather than the gradients in \\gls{dgd}. Hence, on the local update, each \\gls{ai} device performs \\eqref{eq:algUpdate} for $E$ times before updating the global iteration $k$, as \\eqref{eq:localUp}. \n\n\\begin{assumption}\\label{as:6}\n\\cite{flConvergNoniid} The variance of the gradient estimates in each \\gls{ai} device is bounded, i.e.,\n\\begin{equation}\\label{eq:boundedGradient}\n \\mathbb{E}{\\left[\\norm{\\widehat{\\nabla}f_i{(\\mbox {\\boldmath $w$}_k)}}_2^2\\right]}\\leq G^2.\n\\end{equation}\nNote that \\eqref{eq:boundedGradient} is an stricter assumption than Assumptions~\\ref{as:3} and \\ref{as:4}, combined, as shown in \\eqref{eq:sgdVarComb}.\n\\end{assumption}\n\nIf Assumptions~\\ref{as:1}, \\ref{as:2}, \\ref{as:4}, and \\ref{as:6} hold, and $n$ \\gls{ai} devices are selected uniformly at each iteration, then for a diminishing learning rate $\\eta_k = 2/\\mu{(\\xi+k+\\kappa)}$, where $\\kappa({\\in} {[E{-}1]})$ is the local iteration number and $\\xi \\coloneq \\max\\left\\{8L/\\mu,E\\right\\}$, the following inequality holds \\cite[Theorem 3]{flConvergNoniid}:\n\\begin{equation}\\label{eq:flConvergence}\n\\mathbb{E}{\\left[f{\\left(\\mbox {\\boldmath $w$}_k\\right)} {-} f{\\left(\\mbox {\\boldmath $w$}^\\star\\right)}\\right]}\\leq \\frac{2L{\\left(\\frac{\\sigma^2}{N}{+}8{(E{-}1)^2}{+} \\rho E^2G^2{+}\\xi G^2\\right)}}{\\mu^2{(\\xi{+}k{+}\\kappa{-}1)}},\n\\end{equation}\nwhere $\\rho\\coloneq\\frac{4{(N-n)}}{n{(N-1)}}$. Hence, the minimum number of global iterations (i.e., rounds of communications) to attain $\\epsilon$-accuracy approximately becomes \\cite{flConvergNoniid}\n\\begin{equation}\\label{eq:flEpsAccuracyApp}\nK_{\\min} \\propto \\frac{1}{\\epsilon}\\left[\\left(1+\\frac{1}{n}\\right)EG^2 + \\frac{\\frac{\\sigma^2}{N}+G^2}{E}+G^2\\right],\n\\end{equation}\nwhere we assumed $\\xi=\\mathcal{O}{(1+E)}$.\n}\n\\ifCLASSOPTIONcaptionsoff\n\\fi\n\n\n\\Urlmuskip=0mu plus 1mu\\relax\n\\bibliographystyle{IEEEtran}\n\\vspace{-2mm}\n\n\n\n\n\n\n\n\n\n\\section*{Reviewer \\thereviewer}}\n\n\\newenvironment{point}\n {\\refstepcounter{point} \\bigskip \\noindent {\\textbf{Comment~\\thepoint} }\\ \\par\\itshape}\n\t{\\par}\n\n\n\\definecolor{mycolor1}{rgb}{0.29, 0.59, 0.82}%\n\\definecolor{mycolor2}{rgb}{1.0, 0.4, 0.6}\n\\definecolor{mycolor3}{rgb}{0.92900,0.69400,0.12500}%\n\\definecolor{mycolor4}{rgb}{0.71,0.49,0.86}\n\\definecolor{mycolor5}{rgb}{0.12, 0.3, 0.17}\n\\definecolor{mycolor6}{rgb}{0.43, 0.21, 0.1}\n\\definecolor{mycolor7}{rgb}{0.52, 0.73, 0.4}\n\\definecolor{mycolor8}{rgb}{0.98, 0.38, 0.5}\n\\definecolor{mycolor9}{rgb}{0.85, 0.44, 0.84}\n\\definecolor{vColor}{rgb}{0.12, 0.3, 0.17}\n\t\n\n"} +{"id": "red-arxiv-15", "source_id": "red-arxiv_15_red-arxiv-15", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction}\nSemiconducting transition metal dichalcogenides (TMDCs), such as tungsten diselenide (WSe$_{2}$), belong to a family of two-dimensional (2D) materials that exhibit fascinating electronic and optical properties. Due to interlayer coupling, bulk and multi-layered molybdenum- and tungsten-based TMDCs are indirect gap semiconductors, while their monolayers exhibit a crossover to a direct band gap. The strong spin-orbit interactions in TMDCs lead to spin-splitting of few hundred meV in the valence band and few to tens of meV in the conduction band, making TMDCs of interest for potential valleytronics applications.\\cite{Wang2018review} The promising versatility of engineering with TMDCs is rooted in the flexibility of being artificially fabricated into van der Waals homo- or heterostructures. With the recent interest in \\textit{magic-angle} twisted bilayer graphene for the appearance of flat bands \\cite{Cao2018}, moir\\'{e} superlattices of TMDCs naturally become a viable candidate for seeking similar phenomena \\cite{Jin2019nat}. To date, in twisted homobilayer WSe$_{2}$ alone, evidence of low-energy flat bands \\cite{Zhang2020flatbands} that could support emergent electronic phases for a continuum of low twist angles, such as superconductivity \\cite{An2020} and correlated insulator states \\cite{Wang2020} have been reported. Modification of the layer stacking as a function of twist alters the local atomic registry. As such, the interlayer coupling strength in homostructures has demonstrated to also be sensitive to twist angle in the case of bilayer MoS$_{2}$ \\cite{Castellanos-Gomez2014} and WS$_{2}$ \\cite{Yan2019}. Recent reports on domain formation caused by atomic reconstruction as a result of interlayer interactions of TMDC moir\\'{e} superlattices with very low (near 0$^{\\circ}$) twist angles in bilayer MoSe$_{2}$\\cite{Sung2020}, WS$_{2}$ and WSe$_{2}$\\cite{Weston2020} and double bilayer WSe$_{2}$\\cite{An2020} have opened yet another avenue towards novel electronic and excitonic properties.\n\nA major obstacle in the field involves correlating the optical response with imaging of the local structure of a nanometric moir\\'{e} lattice, where efforts have turned towards various electron microscopies and scanning probe microscopies \\cite{Andersen2021,McGilly2020,Weston2020}. The latest generation of electron monochromators with energy resolution of 10 meV and below has enabled the measurement of phonons in hexagonal boron nitride (\\textit{h}-BN) and excitonic absorption in various monolayer TMDCs \\cite{Tizei2015} in a transmission electron microscope with subwavelength spatial resolution. Aside from offering an exceptional combination of spatial and energy resolution, the high-energy incident electrons in electron energy-loss spectroscopy (EELS) have facile access to high-energy excitations of a few eV and beyond, unlike limitations met by optical techniques.\n\nIn this work, the combined high spatial and spectral resolution of aberration-corrected scanning transmission electron microscopy (STEM) and monochromated EELS in the low-loss regime were used to investigate the optical excitation response of atomically-thin WSe$_{2}$, specifically in twisted bilayer WSe$_{2}$ covering a large range of moir\\'{e} angles. Relevant characteristics of the local atomic structure were also obtained within the same platform, including the twist angle and layer stacking. Furthermore, first-principle calculations of the electronic structure modifications in twisted bilayers and expected optical response relative to monolayers and zero-twist bilayers were used to interpret the changes in high-energy spectral features measured in EELS.\n\n\\section{Results}\nAtomically-thin WSe$_{2}$ flakes have been mechanically exfoliated from a bulk synthetically-grown crystal \\cite{Tonndorf2013}, and transferred onto a carbon-coated Si$_{3}$N$_{4}$ TEM grid with periodic 1 $\\mu$m-sized holes. An optical microscopy reflectance image of exfoliated WSe$_{2}$ transferred onto the holey Si$_{3}$N$_{4}$ TEM grids with regions of different layer thickness is presented in Fig. \\ref{monolayer_opt}(b), including the monolayer (ML) area marked by a dashed line and the adjacent trilayer (TL) regions. Atomically-resolved imaging has been performed on an aberration-corrected Nion UltraSTEM200 operated at 60 keV and monochromated EELS was performed on a modified Nion HERMES-S200 (also known as ChromaTEM) operated at 60 keV with the sample cooled to cryogenic temperatures (T $\\approx$ 150 K) as depicted in the schematic shown in Fig. \\ref{monolayer_opt}(a). High-angle annular dark-field (HAADF) imaging encompasses Rutherford scattering towards high angular ranges whose cross-section approximates proportionally to the atomic-number (Z) by $\\sim$Z$^{1.7}$. STEM-HAADF imaging of such freestanding WSe$_{2}$ monolayers, shown in the inset of Fig. \\ref{monolayer_opt}(d), demonstrates the distinguishable intensity difference between the W and Se$_{2}$ atomic columns.\n\nThe EELS loss function, Im${\\{-1/\\epsilon\\}}$, of layered materials like TMDCs depends on the in-plane component of its complex dielectric function $\\epsilon = \\epsilon_{1} + i\\epsilon_{2}$ \\cite{Tizei2015}. As thickness approaches atomically-thin towards monolayers, the loss function becomes dominated by surface effects, and therefore it approximates to Im${\\{\\epsilon\\}}$. It is thus equivalent to measuring the 2D material's absorption function ($\\alpha$), and can reflect the optical excitation response, including the optical bandgap, and interband transitions. The excitonic absorption signatures of both freestanding and \\textit{h}-BN encapsulated WSe$_{2}$ monolayers from low-loss EELS with the elastic (zero-loss) peak background subtracted demonstrates a good general correspondence to the optical absorption spectrum in Fig. \\ref{monolayer_opt}(d)\\cite{Arora2015,Schmidt2016}. With the exception of the additional broadening in the freestanding WSe$_{2}$ \\cite{Shao2022}, the four dominant peaks, labeled as A, B, C, and D, are all well-reproduced in shape and energy positions. The theoretical understanding of the physical origin of each of these resonances in WSe$_{2}$ monolayer, including the high-energy spectral features, will be discussed in Section \\ref{theory}.\n\n\\begin{figure}[tb]\n\t\\centering\n\t\\includegraphics[width=\\columnwidth]{WSe2-ML-microscope.png}\n\t\\caption{(a) Schematic of the electron microscope set-up including an electron monochromator, liquid nitrogen cooling sample holder, and a light collection system (not used in this work). (b) Optical microscopy image of a mechanically-exfoliated WSe$_{2}$ flake transferred onto a holey Si$_{3}$N$_{4}$ TEM grid. (c) Schematic illustrating how nanometric fragments of (twisted) bilayers and trilayers were likely formed during the exfoliation and transfer process. (d) Comparison of low-loss EELS spectra measured from freestanding (black) and \\textit{h}-BN encapsulated (red) WSe$_{2}$ monolayer, and optical absorption spectrum of \\textit{h}-BN encapsulated WSe$_{2}$ monolayer (blue), inset with a STEM-HAADF images of WSe$_{2}$ monolayer illustrating distinct contrast between the W and Se$_{2}$ atomic columns. \\label{monolayer_opt}}\n\\end{figure}\n\n\\begin{figure}[tb]\n\t\\centering\n\t\\includegraphics[width=\\columnwidth]{WSe2-AA-prime.pdf}\n\t\\caption{(a,b) STEM-HAADF images of WSe$_{2}$ monolayer (ML) folded along a zig-zag direction direction indicated in the inset image fast-Fourier transform in (a), resulting in 0$^{\\circ}$ bilayers with AA$^{\\prime}$ (2\\textit{H}) stacking order. (c) Atomic model for AA$^{\\prime}$ stacked WSe$_{2}$ bilayer with armchair (ac) and zig-zag (zz) directions noted in the projected view, and (d) multislice STEM-HAADF image simulation corresponding to the atomic model. (e) Intensity line profiles comparing experimental and simulated images along the selected areas marked by red and blue square brackets in (b) and (d), respectively. \\label{AA-prime_stacking}}\n\\end{figure}\n\n\\begin{figure*}[tb]\n\t\\centering\n\t\\includegraphics[width=0.9\\textwidth]{WSe2-moire-spectra-shifted.png}\n\t\\caption{STEM-HAADF images of WSe$_{2}$ bilayers with (a) 13$^{\\circ}$, (b) 5.5$^{\\circ}$, and (e) 3.4$^{\\circ}$ relative twist angle as measured from the image FFT of the larger twist angles in (c), (d), respectively. The moir\\'{e} unit cells are highlighted by the dotted lines in (a), (b), and (e). In the image FFT, the red marks the orientation of the underlying monolayer, and the blue marks the orientation of the additional layer. (f) Atomic structure model of a moir\\'{e} superlattice of twisted bilayer WSe$_{2}$ of 3.9$^{\\circ}$ twist angle and \\textit{R}-type stacking, with the moir\\'{e} unit cell outlined in black, the various high-symmetry stacking identified by colored circles, and bridge site (Br) labelled. (g) Monochromated EELS spectra from twisted bilayer WSe$_{2}$ with various moir\\'{e} angles compared to a representative monolayer (ML) offset in energy relative to the A exciton. The dotted lines are a guide to show the invariance and changes in the different exciton energy positions. \\label{moire-spectra}}\n\\end{figure*}\n\nIn addition to freestanding WSe$_{2}$ monolayers, sub-micron fragments of bilayers (BLs) and trilayers (TLs) with variable relative twist angle between 0--30$^{\\circ}$ were also common occurrences due to folding during the mechanical exfoliation and transfer process [\\textit{cf.} Fig. \\ref{monolayer_opt}(c) and Fig. \\ref{folding-stack}]. The nature by which bilayers were formed can help shed light on their stacking order. In the case of those formed by folding of free edges, folding of monolayers along a zig-zag direction results in a bilayer with an aligned configuration of 0$^{\\circ}$ relative twist angle and AA$^{\\prime}$ stacking order (following the nomenclature proposed in ref. \\onlinecite{He2014}), as shown in Fig. \\ref{AA-prime_stacking}(a--c). AA$^{\\prime}$ (2\\textit{H}) bilayer stacking corresponds to the most energetically favorable configuration \\cite{He2014}, typical of bilayers obtained by mechanical exfoliation from bulk crystals \\cite{Sarkar2019}. STEM-HAADF image intensity was used to deduce the stacking in the bilayers by comparison with line profiles from multislice image simulations [Fig. \\ref{AA-prime_stacking}(d,e)]. Fig. \\ref{folding-stack}(e) illustrates schematically how folding of monolayers along a zig-zag direction (top), or in between zig-zag and armchair directions (bottom) can result in bilayers of zero and non-zero twist angles, respectively. The high twist angles of the 13$^{\\circ}$ and 28$^{\\circ}$ bilayers summarized in Fig. \\ref{folding-stack} correspond to commensurate moir\\'{e} angles with some of the smallest coincidence site lattices in homobilayers \\cite{Zhao2022}, and falls under the second category when assessing the crystallography of the fold. The second-order reflection (armchair direction) circled in red in Fig. \\ref{folding-stack}(d) indicates that for twist angle of 13$^{\\circ}$, the folding normal (marked by the green arrow) lies closer towards an armchair direction, such that this would result in \\textit{R}-type (or AA) stacking in the bilayer. Other notable features include the seldom Se-vacancy in monolayers \\cite{Zheng2019}, and bands of oxide product made up of atomic clusters of tungsten from preferential oxidation at flake edges [\\textit{cf}. Fig. \\ref{AA-prime_stacking}(b), Fig. \\ref{folding-stack}(c,g)].\n\nThe non-zero relative twist angles routinely observed vary from high twist angles [Fig. \\ref{moire-spectra}(a) and Fig. \\ref{folding-stack}] that generate sub-nanometer moir\\'{e} periodicities to $\\sim$10 nm periods for few degree twists [Fig. \\ref{moire-spectra}(b,e)]. The relative twist angles were measured from the image fast Fourier-transform (FFT), including examples shown in Fig. \\ref{moire-spectra}(c,d), and confirmed using nano-beam electron diffraction, where the latter is less sensitive to effects of scan distortion in STEM imaging. Well-defined hexagonal moir\\'{e} patterns with few-nanometer periodicity are also evident in the STEM-HAADF images for the low twist angles in Fig. \\ref{moire-spectra}(b,e) and Fig. \\ref{3layer_low-angle}. The moir\\'{e} superlattice (as highlighted by a dotted line in the images) of the WSe$_{2}$ homobilayers is composed of regions of high-symmetry stacking, namely, AA, AB, and BA stacking in the case of \\textit{R}-type stacking \\cite{Gogoi2019}. As shown in the moir\\'{e} superlattice structure model in Fig. \\ref{moire-spectra}(f), it is the bridge sites (marked Br) connecting adjacent AB and BA stacked regions, which also have their own unique local alignment, that make up the hexagonal pattern outlines of the twisted bilayers observable in the STEM-HAADF images. Unlike the common AA$^{\\prime}$ stacking, these aforementioned \\textit{R}-type stacking configurations lack inversion symmetry, such that the \\textit{K} ($K^{\\prime}$)-points of the joint Brillouin zone are inequivalent and the spins of the upper and lower split bands in individual layers are instead identical at a given valley\\cite{Schneider2019}. For twisted bilayers, the energy of the indirect $KQ$ transition has been shown to depend on twist-angle and atomic registry in MoS$_{2}$ \\cite{vanderZande2014}, WS$_{2}$ \\cite{Zheng2015}, and more recently in WSe$_{2}$ \\cite{Wang2018,Scuri2020,Merkl2020}.\n\nLow-angle annular dark-field (LAADF) imaging in STEM offers more diffraction contrast sensitivity, and can be used to image the moir\\'{e} superlattice over long-range (hundreds of nm), thus particularly useful to illustrate distortions in the local periodicity over long distances \\cite{Weston2020}. At twist angles towards 5$^{\\circ}$, STEM-LAADF imaging gives a periodic pseudo-atom like contrast that corresponds to the various high-symmetry stacking points in the moir\\'{e} superlattice. Distortions in the moir\\'{e} lattice at ripples, cracks or towards edges of the twisted WSe$_{2}$, such as shown in Fig. \\ref{atomic-reconstruction}(g,i), lead to release of strain and thus reconstruction into domains (an expansion of specific high-symmetry points). The domain contrast is further manifested at the lowest twist angle of 2.3$^{\\circ}$, where arrays of triangular domains with boundaries in dark contrast are arranged in a six-fold fashion [as marked by alternating purple and turquoise triangles in Fig. \\ref{atomic-reconstruction}(c)] continuous over the entirety of the few hundred nm-sized twisted bilayer and trilayer. The domain boundary geometry differs between \\textit{R}-type (AA) and \\textit{H}-type (AA$^{\\prime}$) stacking in twisted homobilayer TMDCs \\cite{Weston2020}, taking on a triangular geometry and kagome-like pattern dominated by hexagonal regions, respectively. Both geometries are governed by atomic reconstruction where some of the high-symmetry stacking regions become more energetically favorable towards low twist angles.\n\nThe purpose of the STEM-LAADF imaging on twisted WSe$_{2}$ bilayers is effectively two-fold: firstly to identify lattice distortions and the occurrence of atomic reconstruction; and secondly, the domain boundary geometry was used to identify the stacking order in lower twist angles. Three of the five twisted bilayers formed by stacking presented in Fig. \\ref{atomic-reconstruction} have been identified to have \\textit{R}-type (AA) stacking. In addition, comparative STEM-HAADF images of regions with atomic reconstruction [see Fig. \\ref{atomic-reconstruction}(d) and (h)] confirms the domain boundary are the bridge sites (Br), and the triangular domains are made up of AB/BA stacking configuration as outlined by the purple triangle in Fig. \\ref{atomic-reconstruction}(d). Observation of atomic reconstruction over long-range in only the 2.3$^{\\circ}$ twisted bilayer WSe$_{2}$ validates the calculated crossover angle for \\textit{R}-type stacked bilayer TMDCs of $\\theta^{\\circ}_{3R}$ $\\sim$ 2.5$^{\\circ}$ by Enaldiev \\textit{et al}.\\cite{Enaldiev2020}, below which the bilayers transition from a rigid rotation to a lattice-reconstructed regime.\n\nThe excitonic absorption signatures of twisted WSe$_{2}$ bilayers from low-loss EELS are presented in Fig. \\ref{moire-spectra}(g) aligned in energy with respect to the A exciton alongside a spectrum from a WSe$_{2}$ monolayer for comparison. The four excitonic resonances are also prominently reproduced in the case of twisted bilayers. The energy separation between A and B exciton listed in Table \\ref{exciton-energies}, which is linked to the spin-orbit coupling, remains relatively constant with the number of layers, as well as twist angle. Small shifts in the A exciton, coupled with simultaneous rigid shifts of the B exciton are visible in the unadjusted spectra in Fig. \\ref{images_spectra}(c) and they can be attributed to local strain \\cite{Schmidt2016} or unintentional doping. It is worth noting the small peaks $\\sim$300 meV below the A exciton [Fig. \\ref{moire-spectra}(g) or at 1.3--1.4 eV as presented in Fig. \\ref{images_spectra}(c)] is likely of the same origin as the so-called subgap exciton peak measured using momentum (q)-resolved EELS at non-zero q in various TMDCs including WSe$_{2}$ \\cite{Hong2020}.\n\n\\begin{table*}\n \\sisetup{round-mode=places,round-precision=3}\n \\centering\n \\caption[]{Fitting of the A, B, C, D excitonic peak energy positions (X$_{A}$, X$_{B}$, X$_{C}$, X$_{D}$) obtained from the EELS spectra in Fig. \\ref{images_spectra}(c), and the relative energy difference between A and B exciton (A--B) governed by valence band splitting at $K$-point, B and C exciton (B--C), and A and C exciton (A--C). Twist angles marked by asterisks (*) are relative twist angles because its stacking order remains undetermined and can also be 60 -- $\\theta$.}\n \\label{exciton-energies}\n \\small\n \\begin{ruledtabular}\n \\begin{tabular}{cccccccc}\n Twist angle ($^{\\circ}$) & X$_{A}$ (eV) & X$_{B}$ (eV) & X$_{C}$ (eV) & X$_{D}$ (eV) & $\\Delta_{A-B}$ (eV) & $\\Delta_{B-C}$ (eV) & $\\Delta_{A-C}$ (eV) \\\\\n \\hline\n\tMonolayer & \\num{1.69665} & \\num{2.14549} & \\num{2.59033} & \\num{3.02347} & 0.449 & 0.445 & 0.894\\\\\n\t0 & \\num{1.75122} & \\num{2.24806} & \\num{2.38763} & \\num{2.95003} & 0.497 & 0.140 & 0.636\\\\\n\t2.3 & \\num{1.75306} & \\num{2.22515} & \\num{2.41646} & \\num{2.95970} & 0.472 & 0.191 & 0.663\\\\\n\t3.4 & \\num{1.71144} & \\num{2.16072} & \\num{2.42001} & \\num{2.93420} & 0.449\t & 0.259 & 0.709\\\\\n\t4.1* & \\num{1.74908} & \\num{2.19568} & \\num{2.44703} & \\num{2.97394} & 0.447 & 0.251 & 0.698\\\\\n\t5.5 & \\num{1.71309} & \\num{2.15818} & \\num{2.48009} & \\num{2.97057} & 0.445 & 0.322 & 0.767\\\\\n\t7.2* & \\num{1.76281} & \\num{2.22385} & \\num{2.48613} & \\num{2.95203} & 0.461 & 0.262 & 0.723\\\\\n\t13 & \\num{1.69722} & \\num{2.14272} & \\num{2.47160} & \\num{2.93910} & 0.446 & 0.329 & 0.774\\\\\n\t28 & \\num{1.74317} & \\num{2.18494} & \\num{2.51673} & \\num{2.98775} & 0.442 & 0.332 & 0.774\\\\\n \\end{tabular}\n \\end{ruledtabular}\n\\end{table*}\n\nComparing different twist angles in the bilayers also show sizable shifts in the C exciton energy up to 200 meV, which subsequently alter drastically the overall shape of the spectrum at the B--C transitions, with extremes between the aligned (0$^{\\circ}$ and 60$^{\\circ}$) and anti-aligned (towards 30$^{\\circ}$) cases suggesting underlying differences in interlayer coupling. The exciton peak shifts are quantitatively determined by peak fitting to the second-derivative treated with Savitzky-Golay filtering using multiple Gaussians, four in total, each corresponding to a structure in each EELS spectrum. The results of the peak fitting are summarized in Table \\ref{exciton-energies} and displayed graphically in Fig. \\ref{images_spectra}(e). Consistent with optical absorption \\cite{Arora2015,Zhao2013}, the layer thickness dependence of A, B, and C exciton resonances of few-layered WSe$_{2}$ measured using monochromated EELS shows a pronounced decrease in the C and D exciton energies between the monolayer and 0$^{\\circ}$ bilayer with AA$^{\\prime}$ stacking in Fig. \\ref{moire-spectra}(c). \n\nFurthermore, the C exciton energy continues to show the same decreasing trend with the number of layers when comparing bilayers and trilayers of the same twist angle [Fig. \\ref{3layer_low-angle}(b)]. The more pronounced shifts of the C and D excitonic peaks relative to the A and B excitons with layer thickness suggests an association to the localization of the electronic states involved in the respective transitions, in particular the orbital character of the chalcogen atoms (Se $p$-orbitals in this case) that contribute most to the interlayer coupling \\cite{Zhang2015orbitals}. Specifically, the valence band maximum (VBM) at $K$-point exhibits in-plane $p_{x,y}$ character, while the VBM and conduction band minimum (CBM) towards $\\Gamma$-point displays mainly out-of-plane $p_{z}$ character \\cite{Voss1999} and thus most strongly affected by interlayer separation in few-layered WSe$_{2}$; the $p$-orbital contribution shows a mixture of $p_{x,y}$ and $p_{y}$ character at $Q$-point [see the orbital-projected band structure for AA$^{\\prime}$ WSe$_2$ in Fig. \\ref{orbitals}].\nThis corroborates well with the expected changes in the band structure between monolayer towards bulk WSe$_{2}$, namely the appearance of an indirect gap $K$-$Q$ transition due to the downshift of the $Q$-valley overtaking the $K$-point as CBM beyond a monolayer. For the twisted bilayer WSe$_{2}$ with increasing relative twist angle, the C exciton energy blueshifts by $\\sim$200 meV [\\textit{cf.} Table \\ref{exciton-energies} and Fig. \\ref{images_spectra}(e)], indicative of an upshift in the CBM at $Q$-valley. Recent studies on twisted bilayer WSe$_{2}$ reported similar blueshifting in the indirect $KQ$ interlayer exciton emission energy as a function of twist angle, reaching a maximum at $\\theta$ = 30$^{\\circ}$, while relatively minimal change in the direct $KK$ intralayer exciton (X$_{A}$) in comparison \\cite{Scuri2020,Merkl2020}. Therefore the phonon-assisted indirect $KQ$ exciton energy directly reflects the interlayer electronic coupling strength, which is strongest at 0$^{\\circ}$ and 60$^{\\circ}$ \\cite{Liu2014}. Raman spectroscopy is typically used as an indicator of the mechanical (i.e. vibrational) interlayer coupling \\cite{Castellanos-Gomez2014,Liu2014,Sarkar2019}.\nIn a similar manner, energy shifts of the C exciton energy can also gauge the electronic interlayer coupling effects from absorption-based techniques such as EELS, indicating a reduction in its strength with moir\\'{e} angle towards 30$^{\\circ}$ in twisted WSe$_{2}$ bilayers.\n\n\\section{Discussion}\n\\label{theory}\n\nFundamental insights on the excitonic response of TMDCs can be successfully acquired using the \\textit{GW}+BSE (Bethe-Salpeter equation) method on top of DFT calculations \\cite{Komsa2012,Qiu2016,Marsili2021}. In the case of W-based TMDC monolayers, the lowest energy exciton A is mainly composed of transitions near the $K$ point from the VBM to the second unoccupied state in the conduction band (CBM+1) which has the same spin-character \\cite{Hong2021,Gillen2021,Marsili2021}. The B peak has a more complex character but it is mostly formed by transitions near $K$ from VBM$-$1 to CBM. As discussed in the literature \\cite{Hong2021,Gillen2021}, higher energy spectral features cannot be linked to individual excitons but arise from an ensemble of excitonic transitions very close in energy. The complex TMDCs excitonic spectra is therefore usually broadened to reproduce the same number of peaks seen in experiments. However these structures may not present a homogeneous excitonic character and result from a superposition of excitons belonging to different orders of distinct Rydberg series. This situation has led to an uneven nomenclature \\cite{Zhao2013,Kozawa2014,Gillen2017IEEE,Hong2021,Gillen2021}, and particular attention should be paid when comparing different references.\n\nThe character of the excitonic transitions in the AA$^{\\prime}$ bilayer cannot be deduced \\textit{a priori} from the spectroscopic response of the monolayer. Therefore, the AA$^{\\prime}$ bilayer is treated explicitly, along with the monolayer case, via a non-collinear \\textit{GW}+BSE approach (computational details can be found in the SI). This method has been shown to provide good agreement with experiments for the energy separation of the A and B excitons compared to a perturbative treatment of spin-orbit coupling \\cite{Marsili2021}. The imaginary part of the dielectric function $\\epsilon_2$ for the monolayer and AA$^\\prime$ bilayer \\ce{WSe2} together with the oscillator strengths of the main excitonic transitions are shown in Fig. \\ref{spectra}(a) and (c). For the sake of comparison with experiments, four energy windows centered at local maxima of $\\epsilon_2$ have been defined for the mono- and bilayer spectra, respectively, which can be linked to the experimental peaks A--D [shaded regions in Fig. \\ref{spectra}(a,b)]. Fig. \\ref{spectra}(c,d) presents the weight in the reciprocal space of the transitions contributing to each of these peaks: for each exciton $\\lambda$ in a given energy window, the weights $\\sum_{vc}A^{cvk}_\\lambda$ of the electron-hole pairs of wave vector $k$ are considered, and all these contributions are summed up taking into account the oscillator strength of each individual exciton. \n\n\\begin{figure}\n\t\\includegraphics[width=\\columnwidth]{BSE-spectra.pdf}\n\n\t\\caption{Imaginary part of the dielectric function calculated using a GW+BSE approach for (a) monolayer and (b) AA$^\\prime$ bilayer \\ce{WSe2} together with the oscillator strengths of the main excitonic transitions. The shaded regions correspond to the main peaks identified in experiments. (c,d) Weight in reciprocal space of the transitions contributing to each of these peaks.}%\n\t\\label{spectra}\n\\end{figure}\n\nPeaks A and B are formed by transitions near $K$ and both features undergo a blueshift in the bilayer due to an increase in the direct band gap at $K$ compared to that of the monolayer from. Peak C has a more complex structure where \\textit{k}-points next to both $K$ and $Q$ contributes. While the points next to $K$ have a higher spectral weight, the $Q$ points are three times more numerous and thus the integrated contribution of the two regions of the reciprocal space is comparable. Peak C had been previously described as a higher-order exciton of the same series as peak B \\cite{Hong2021}; Fig. \\ref{spectra}(c,d) illustrates that additional excitonic transitions contribute to this spectral feature. Finally, peak D is dominated by transitions from the last occupied to the first unoccupied band near the $Q$ point. The appearance of these strong high-energy excitonic transitions had been linked to the high joint density of states that arises from band nesting effects in TMDCs \\cite{Carvalho2013,Bieniek2018,Mennel2020}.\n\nAs discussed in the experimental results, blueshifts of the excitonic peak C in bilayer TMDCs were observed as a function of their twist angle by means of EELS. It can be reasonably argued that the decomposition in the reciprocal space of these spectral features might be the same for aligned and twisted bilayers. Therefore, while the \\textit{GW}+BSE approach gets too computationally expensive when applied to extended moir\\'{e} supercells, it might be feasible to link trends observed in the spectra to continuous changes of the DFT electronic structure. The band structure of a moir\\'{e} supercell is highly folded and therefore it can be hardly compared to those of a reference untwisted bilayer or monolayer without the use of unfolding methods which provide a primitive cell effective band structure \\cite{Ku2010,Lee2013unfolding}. These techniques have been employed already for the unfolding of the bands of various twisted 2D heterostructures \\cite{Nishi2017,Matsushita2017,Sanchez-Ochoa2020,Magorrian2022band}. Unfolding routines require the definition of a reference primitive basis; in the case of moir\\'{e} structures, unfolding has to be performed twice to take into account the intrinsic periodicity of each of the two layers separately. The unfolded bands can then be projected independently on the two layers used as reference.\n\nAs an example, in Fig. \\ref{unfolded-example}(a) the unfolding method has been applied to a moir\\'{e} supercell with a twist angle of 21.8$^\\circ$ (computational details are provided in the SI). The purple and blue bands in Fig. \\ref{unfolded-example}(c) were unfolded using the primitive cell of the bottom and top layer as a reference [following their color-coded cells in Fig. \\ref{unfolded-example}(b)], respectively, and were subsequently projected onto the same layer. The $\\Gamma$--$K$--$M$ path connects high symmetry points of the bottom layer but not of the top layer. The unfolded occupied purple bands closely follow those of the untwisted AA$^\\prime$ bilayer (white dashed lines), although an upper shift is observed near the $\\Gamma$ point. This region of reciprocal space is where the occupied bands of the monolayer (yellow dashed line) and bilayer differ and is very sensitive to the interlayer spacing which, in twisted TMDCs, can vary with the twist angle and the layer registry \\cite{vanderZande2014,Yan2019}. Finally, a few small minigaps ($< 50$ meV) open at the crossing of the bands of the two layers if they present the same spin character. More relevant variations are observed in the conduction bands. With respect to the AA$^\\prime$ bilayer, the first two unoccupied bands of the monolayer cross in the $K$--$Q$ path and are higher in energy at $M$. The unfolded purple bands show the same characteristics and can therefore be reasonably interpreted as the bands of the monolayer perturbed by the adjacent twisted layer.\n\n\\begin{figure*}[!htb]\n\t\\includegraphics[width=0.95\\textwidth]{Unfolded-bands.png}\n\t\\caption{(a) Structure of a WSe$_{2}$ moir\\'{e} supercell with a twist angle of 21.8$^\\circ$. Blue and purple arrows indicate the primitive unit cell of the top and bottom layer, respectively. (b) Reciprocal lattice vectors and Brillouin zones of the primitive cells of the individual layers, and the supercell Brillouin zones (grey hexagons). (c) DFT band structure of the moir\\'{e} cell unfolded along the high symmetry directions of the irreducible Brillouin zone shaded in (b). Blue and purple lines correspond to the unfolding using as reference primitive cell those of the top and bottom layer, respectively. The yellow and white dashed lines are the band structure of WSe$_{2}$ monolayer and untwisted bilayer, respectively. (d) Unfolded band structures of WSe$_{2}$ moir\\'{e} supercells with different twist angles projected over the reference layer. (e) Direct and indirect band gap as a function of the twist angle. Gray dashed line indicates the direct gap of the WSe$_{2}$ monolayer.}\n\t\\label{unfolded-example}\n\\end{figure*}\n\nIn Fig. \\ref{unfolded-example}(d) the unfolded bands of twisted \\ce{WSe2} bilayers are presented as a function of the twist angle. At the $K$ point, both the direct band gap and the spin-orbit splitting do not change with the twist angle and remain equal to the value of the untwisted bilayer [blue dots in Fig. \\ref{unfolded-example}(e)]. This behavior can be explained by the reduced interlayer orbital coupling for the band-edge states at the $K$ point \\cite{Kang2016unified}. Reasonably assuming that layer twists will only have a minor effect on screening, invariance of low-energy excitonic features can be deduced, as observed for the A and B peaks in the EELS spectra in Fig. \\ref{moire-spectra}(g).\n\nWhile the valence band along the $K$--$Q$ path remains invariant with the twist angle, the bottom of the CBM near $Q$ point upshifts. The values of the indirect gap $KQ$ as a function of the twist angle were extracted and plotted in Fig. \\ref{unfolded-example}(e). The maximum of the indirect gap occurs for twist angles close to 30$^\\circ$ and progressively decreasing of several tens of meV going towards 0$^\\circ$ and 60$^\\circ$. This behavior reproduces well the trend observed in the indirect gap measured by photoluminescence \\cite{Merkl2020} where $KQ$ indirect excitons can be activated by phonons. C and D excitons observed in EELS, optical absorption or reflectivity, involves dipolar transitions near the $Q$-point. Since the upper valence band in this reciprocal space region is not affected by the twist, the energy difference of the vertical transitions follows the same trend as the indirect gap, but it is shifted at higher energies. While the excitonic response of twisted \\ce{WSe2} has not here been explicitly calculated, the analysis of the unfolded bands combined with the study of the excitonic character of spectral features from mono and bilayer permits explaining the experimental trends observed in EELS as a function of twist angle.\nIt should be noted that this interpretative scheme may not be valid when considering very low (near-zero) twist angles, where the moir\\'{e} structure undergoes extensive structural relaxations and where excitonic states may re-hybridise \\cite{Brem2020}, giving rise to complex spectral features that cannot be simply linked to the spectral response of the perfect mono- or bilayer.\n\n\\section{Conclusions}\nThe evolution of the excitonic response in twisted bilayer WSe$_{2}$ as a function of moir\\'{e} angle has been investigated using monochromated STEM-EELS under cryogenic conditions, highlighting a progressive blueshift of the high-energy C excitonic peaks relative to the AA$^{\\prime}$-stacked bilayer. Atomically-resolved imaging was used to provide relevant structural information on the twisted bilayers, including the twist angle and stacking order, in addition to revealing the occurrence of atomic reconstruction in the lowest observed twist angle of 2.3$^{\\circ}$. In combination with first-principles calculations based on the \\textit{GW}+BSE approaches, the physical origin of the high-energy spectral features in monolayer and AA$^{\\prime}$ bilayer WSe$_{2}$ were examined. Moreover, the unfolded DFT electronic structure of twisted bilayers showed an uplifting of the $Q$-valley CBM with respect to the untwisted AA$^{\\prime}$ bilayer. The trends in band structure changes with moir\\'{e} angle were then linked to the BSE calculated dielectric response of the untwisted bilayer, giving good agreement to the dipolar transitions near $Q$-point contributing to the high-energy C exciton observed in EELS from the current work as well as the phonon-assisted indirect $K$--$Q$ transition measured by photoluminescence by other groups. Therefore tuning of the C exciton transitions as measured by absorption spectroscopy like EELS is an effective indicator of the electronic interlayer coupling.\n\nWith capabilities to collect photons generated by cathodoluminescence within the electron microscope utilised in this study, the addition of \\textit{h}-BN encapsulation can bring further insight to interlayer interactions in such twisted bilayers, in particular at the lengthscales of the moir\\'{e} periodicity. The expected reduction in EELS absorption linewidths will aid in the identification of small spectral variations; in conjunction, sufficient excitation of electron-hole pairs in the \\textit{h}-BN for recombination in the TMDC opens the possibility for concurrent correlation to the indirect exciton emission in the twisted bilayers.\n\n\\acknowledgments{\nThe authors acknowledge funding from the ANR, program of future investment TEMPOS-CHROMATEM (No. ANR-10-EQPX-50). This work was supported by the European Union in the Horizon 2020 Framework Program (H2020-EU) under Grant Agreement No. 823717 (ESTEEM3) and 101017720 (eBEAM). S.Y.W. acknowledges NSERC for the postdoctoral fellowship funding. A.A. acknowledges financial support from the German Research Foundation (DFG Project Nos. AR 1128/1-1 and AR 1128/1-2) and and NM-ICPS of the DST, Government of India through the I-HUB Quantum Technology Foundation (Pune, India). M.P. acknowledges CINECA for CPU time granted within ISCRA-B and ISCRA-C initiatives. This research was sponsored [in part] by the NATO Science for Peace and Security Programme under grant G5936.\n}\n\n\\bibstyle{apsrev4-1}\n"} +{"id": "red-arxiv-16", "source_id": "red-arxiv_16_red-arxiv-16", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section{Introduction and main result}\\label{sec:Intro}\r\n\r\nAs the variance quantifies the fluctuations of a random variable around its mean, upper bounds for variances are an important topic of probability theory. A main motivation to study lower bounds comes from the problem to establish central limit theorems. Here, after applying quantitative bounds for the normal approximation to standardised random variables, one has to divide by powers of the variance, whence it is essential to have lower bounds for the variance. In this paper, we derive such lower bounds for random variables that only depend on an underlying Poisson process. These so-called Poisson functionals play a crucial role in stochastic geometry but also appear in other branches of probability theory.\r\n\r\n\r\n\t\tLet $\\eta$ be a Poisson process on a measurable space $(\\mathbb{X},\\mathcal{X})$ with a $\\sigma$-finite intensity measure $\\lambda$. The underlying probability space is denoted by $(\\Omega,\\mathscr{F},\\mathbb{P})$. Let $\\mathbf{N}$ denote the set of all $\\sigma$-finite counting measures equipped with the $\\sigma$-field generated by the mappings $\\nu\\mapsto\\nu(B)$ for $B\\in\\mathcal{X}$. The Poisson process can be seen as a random element in $\\mathbf{N}$. A detailed introduction to Poisson processes can be found in e.g.\\ \\cite{LP17}. A Poisson functional $F$ is a real-valued measurable function on $\\Omega$ that can be written as $F=f(\\eta)$, where $f$ is a real-valued measurable function on $\\mathbf{N}$ and is called representative. For simplicity and by a slight abuse of notation, we denote a Poisson functional in the following by $F=F(\\eta)$. If $F$ is square-integrable, we write $F\\in L^2_\\eta$.\r\n\r\nThroughout this paper we are mostly interested in the asymptotic behaviour of Poisson functionals in two frameworks, namely increasing intensity or increasing observation window. More precisely, we study for $s\\to\\infty$ a family of Poisson functionals $F_s$, $s\\geq1$, where $F_s$ is either a Poisson functional on a homogeneous Poisson process with intensity $s$ or a functional that considers only points of a fixed Poisson process in an observation window that extends to the full space for $s\\to\\infty$.\r\n\r\nCentral limit theorems for some Poisson functionals were established, for example, in \\cite{AB93, BX06,BY05,CSY13,L19,LPY20,LSY19,LPS16,P05,PW08,PY01,R05,SY21}. Since the proofs require lower variance bounds as discussed above, these papers also study the asymptotic behaviour of the variance. Often convergence of the variance to a non-degenerate (i.e.\\ non-zero) asymptotic variance constant is shown. Investigating the behaviour of the variance usually requires a lot of effort. This is the reason why we want to treat the problem of lower variance bounds as a separate issue from establishing central limit theorems in this paper. To this end, we provide a lower variance bound, which can be seen as the counterpart to the Poincar\\'e inequality.\r\n\r\nAs mentioned above, a common problem is to show that the asymptotic variance constant is positive. But even if one has an explicit representation for the latter, it can be hard to show positivity because positive and negative terms could cancel out. Therefore, proving the non-degeneracy of the asymptotic variance can be a different problem than computing the limiting constant of the variance. In this case, it can be helpful to employ lower bounds for variances to deduce positivity of the asymptotic variance constant.\r\n\r\nSince the covariance matrix $\\Sigma_s\\in\\mathbb{R}^{m\\times m}$ of Poisson functionals $F^{(1)}_s,\\hdots, F^{(m)}_s$, $s\\geq 1$, satisfies\r\n$$\r\n\\Var\\bigg[ \\sum_{i=1}^m \\alpha_i F_s^{(i)} \\bigg] = \\alpha^T \\Sigma_s \\alpha\r\n$$\r\nfor all $\\alpha=(\\alpha_1,\\hdots,\\alpha_m)\\in\\mathbb{R}^m$, one can use lower bounds for variances to establish positive definiteness of the asymptotic covariance matrix $\\Sigma=\\lim_{s\\to\\infty} \\Sigma_s$ if it exists. Knowing the positive definiteness of $\\Sigma$ is of interest since it ensures that none of the Poisson functionals can be written asymptotically as a linear combination of the others. Furthermore, some bounds for the quantitative multivariate normal approximation (see e.g.\\ \\cite{SY21}) require the positive definiteness of the covariance matrix of the limiting normal distribution.\r\n\t\t\r\n\t\t\r\nIn order to present our main result, we need some notation and some further background on Poisson functionals.\tFor $x\\in\\mathbb{X}$ the difference operator of a Poisson functional $F=F(\\eta)$ is defined by\r\n\t\\begin{align*}\r\n\t\tD_xF=F(\\eta+\\delta_x)-F(\\eta),\r\n\t\\end{align*}\r\n\twhere $\\delta_x$ denotes the Dirac measure concentrated at $x$.\tIn general, the $n$-th iterated difference operator $D^n$ is recursively defined by\r\n\t\\begin{align*}\r\n\tD^n_{x_1,\\hdots,x_n}F=D_{x_1}(D^{n-1}_{x_2,\\hdots,x_{n}}F)\r\n\t\\end{align*} \r\nfor $n>1$ and $x_1,\\hdots,x_n\\in\\mathbb{X}$. In particular, for $x,y\\in\\mathbb{X}$ the iterated, second-order difference operator equals\r\n\t\\begin{align*}\r\n\t\tD_{x,y}^2F=D_x(D_yF)=F(\\eta+\\delta_x+\\delta_y)-F(\\eta+\\delta_x)-F(\\eta+\\delta_y)+F(\\eta).\r\n\t\\end{align*}\r\n\tFor $F\\in L_\\eta^2$ define $f_n(x_1,\\hdots,x_n)=\\frac{1}{n!}\\E[D^n_{x_1,\\hdots,x_n}F]$ for $x_1,\\hdots,x_n\\in\\mathbb{X}$ and $n\\in\\mathbb{N}$. Then, $f_n$ is symmetric and square-integrable for all $n\\in\\mathbb{N}$ and the Fock space representation of $F$ is given by\r\n\t\\begin{align}\r\n\t\t\\label{eq:fock_space}\r\n\t\t\\E[F^2]=\\E[F]^2+\\sum_{n=1}^{\\infty}n!\\lVert f_n\\rVert_n^2,\r\n\t\\end{align}\r\n\twhere $\\lVert \\cdot\\rVert_n$ denotes the norm on $L^2(\\lambda^n)$ (see, for example, \\cite[Theorem 1.1]{LP11} or \\cite[Theorem 18.6]{LP17}). Using this representation, one can directly derive\r\n\t\\begin{align}\\label{eqn:Bound_First_Chaos}\r\n\t\t\\Var[F]=\\sum_{n=1}^{\\infty}n!\\lVert f_n\\rVert_n^2\\geq \\lVert f_1\\rVert_1^2=\\int(\\E[D_xF])^2\\;\\mathrm{d}\\lambda(x).\r\n\t\\end{align}\r\n\tThe problem with this lower variance bound is that the difference operator can in general be positive or negative and, thus, can have expectation zero. To overcome this issue, we provide in this paper a counterpart to the well-known Poincar\\'{e} inequality \r\n\t\\begin{align}\\label{eqn:Poincare}\r\n\t\t\\Var[F]\\leq \\int\\E[(D_xF)^2]\\;\\mathrm{d}\\lambda(x)\r\n\t\\end{align}\r\n\tfor $F\\in L^2_\\eta$ (see, for example, \\cite[Theorem 18.7]{LP17}). In the following main result we give a condition under which the variance of $F$ can be bounded from below by a constant times the right-hand side of the Poincar\\'{e} inequality, whence we can think of it as a reversed Poincar\\'e inequality. \r\n\t\r\n\t\\begin{theorem}\r\n\t\t\\label{thm:varbound}\r\n\t\tLet $F\\in L_\\eta^2$ be a Poisson functional satisfying\r\n\t\t\\begin{equation}\r\n\t\t\t\\label{condition}\r\n\t\t\t\\mathbb{E}\\left[\\int (D_{x,y}^2F)^2\\; \\mathrm{d}\\lambda^2(x,y)\\right]\\leq\\alpha \\mathbb{E}\\left[\\int (D_{x}F)^2 \\;\\mathrm{d}\\lambda(x)\\right]<\\infty\r\n\t\t\\end{equation}\r\n\t\tfor some constant $\\alpha\\geq 0$.\r\n\t\tThen\r\n\t\t\\begin{equation}\r\n\t\t\t\\label{prop}\r\n\t\t\t\\mathrm{Var}[F]\\geq\\frac{4}{(\\alpha+2)^2}\\mathbb{E}\\left[\\int (D_{x}F)^2 \\;\\mathrm{d}\\lambda(x)\\right].\r\n\t\t\\end{equation}\r\n\t\\end{theorem}\r\n\r\n\r\nThe inequality \\eqref{prop} provides a non-trivial lower bound for the variance as soon as one can show that the difference operator is non-zero with positive probability. To this end, one can construct special point configurations that lead to a non-zero difference operator and occur with positive probability. This is often much easier than to verify that the expectation of the difference operator is non-zero as required in \\eqref{eqn:Bound_First_Chaos}.\r\n\r\nLet us discuss some alternative approaches to derive lower variance bounds for Poisson functionals or statistics arising in stochastic geometry. In \\cite[Theorem 5.2]{LPS16}, a general lower bound for variances of Poisson functionals is established, where, for fixed $k\\in\\mathbb{N}$ and $I_1,I_2\\subseteq\\{1,\\hdots,k\\}$, one has to bound\r\n$$\r\n\\bigg|\\mathbb{E}\\bigg[ f\\big(\\eta+\\sum_{i\\in I_1} \\delta_{x_i}\\big) - f\\big(\\eta+\\sum_{i\\in I_2} \\delta_{x_i}\\big) \\bigg] \\bigg|\r\n$$\r\nfrom below for $x_1,\\hdots,x_k\\in\\mathbb{X}$. Since here more than one point can be added, which allows to enforce particular point configurations, this expression is often easier to control than the expectation of the first difference operator in \\eqref{eqn:Bound_First_Chaos}. But one still has the problem that the difference within the expectation can be both positive and negative.\r\n\r\nIn \\cite{BY05,P05,PW08,PY01}, lower bounds for variances of so-called stabilising functionals of Poisson processes and sometimes also binomial point processes were deduced. These results have all in common that generalised difference or add-one-cost operators are required to be non-degenerate. This is similar to our work, but the random variable that has to be non-degenerate is more involved than the difference operator and, moreover, the results apply only to stabilising functionals and not to general Poisson functionals.\r\n\r\nA further approach is to condition on some $\\sigma$-field and to bound the variance from below by the expectation of the conditional variance with respect to this $\\sigma$-field. In the context of stochastic geometry this was used, for example, in \\cite{AB93} or \\cite{BFV10, R05}. By conditioning on the $\\sigma$-field it is sufficient to consider some particular point configurations similarly as in our Theorem \\ref{thm:varbound}. In the recent preprint \\cite{CX20}, a condition requiring that some conditional expectations are not degenerate is used to establish lower variance bounds for stabilising functionals.\r\n\r\nIn order to demonstrate how Theorem \\ref{thm:varbound} can be applied, we derive lower variance bounds for specific examples from stochastic geometry:\r\n\r\n\\textbf{Spatial random graphs.} We consider degree and component counts of random geometric graphs and edge length functionals and degree counts of $k$-nearest neighbour graphs. By proving lower bounds for variances of linear combinations of such statistics, we show the positive definiteness of asymptotic covariance matrices. Combining these findings with the results from \\cite[Section 3]{SY21} provides quantitative multivariate central limit theorems for the corresponding random vectors.\r\n\r\n\\textbf{Random polytopes.} By taking the convex hull of the points of a homogeneous Poisson process in the $d$-dimensional unit ball, one obtains a random polytope. We study the $L^p$ surface area, which generalises volume and surface area. For two different $L^p$ surface areas we show positive definiteness of the asymptotic covariance matrix and, as a consequence, a result for the multivariate normal approximation. In particular, this allows to study the joint behaviour of volume and surface area of the random polytope.\r\n\r\n\\textbf{Poisson shot noise processes.} We provide a lower variance bound for the volume of excursion sets of a Poisson shot noise process. In comparison to the works \\cite{BST12}, \\cite{L19} or \\cite{LPY20} we modify the assumptions on the kernel function of the Poisson shot noise process.\r\n\r\nThe considered statistics of spatial random graphs fit into the framework of stabilising functionals of Poisson processes, whence the results for the non-degeneracy of the asymptotic variance of stabilising functionals discussed above might be applicable. The $L^p$ surface area is still stabilising, but here the variance does not scale like the intensity of the underlying Poisson process, whence the previously mentioned results are not available any more. Finally, in case of general Poisson shot noise processes we do not have stabilisation at all. In order to apply Theorem \\ref{thm:varbound}, one has to bound the left-hand side of \\eqref{condition} from above. In case of the spatial random graphs and the random polytope, this can be done easily by employing results from \\cite{LSY19} due to stabilisation. \r\n\r\nThis paper is organised as follows. Our main result Theorem \\ref{thm:varbound} is proven in Section \\ref{sec:proof_main_result}. The following three sections are devoted to applications, statistics of spatial random graphs in Section \\ref{sec:spatial_geometric_graphs}, the $L^p$ surface area of random polytopes in Section \\ref{sec:random_polytopes} and the excursion sets of a Poisson shot noise processes in Section \\ref{sec:excursion_sets}. Finally, we recall some facts about stabilising functionals in the appendix.\t\r\n\t\t\r\n\t\r\n\r\n\r\n\r\n\r\n\t\\section{Proof of Theorem \\ref{thm:varbound}} \\label{sec:proof_main_result}\r\n\tThe proof of Theorem \\ref{thm:varbound} relies upon using the Fock space representations of $F$ and its first two difference operators.\r\n\t\r\n\t\t\\begin{proof}[Proof of Theorem \\ref{thm:varbound}]\r\n\t\t\tFor $n\\in\\mathbb{N}$ let $f_n$ denote the kernels of the Fock space representation of $F$. Recall that \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{Var}[F]&=\\sum_{n=1}^\\infty n!\\lVert f_n\\rVert_n^2.\r\n\t\t\t\\end{align*}\r\n\t\t\t\r\n\t\t\tFirst we assume $\\alpha>0$. Then we know by assumption \\eqref{condition} that $F,D_xF,D_{x,y}^2F\\in L^2_\\eta$ for $\\lambda$-a.e.\\ $x,y\\in\\mathbb{X}$. Using Fubini's theorem, the monotone convergence theorem and applying the Fock space representation \\eqref{eq:fock_space} to the first and second order difference operator provides\r\n\t\t\t\\allowdisplaybreaks\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathbb{E}\\left[\\int (D_{x}F)^2 \\;\\mathrm{d}\\lambda(x)\\right]&=\\int \\sum_{n=0}^{\\infty}\\frac{1}{n!}\\int\\E[D_{x_1,\\dots,x_n}^n(D_xF)]^2\\;\\mathrm{d}\\lambda^n(x_1,\\dots,x_n)\\;\\mathrm{d}\\lambda(x)\\\\\r\n\t\t\t\t&= \\sum_{n=0}^{\\infty}\\frac{1}{n!}\\int\\E[D_{x_1,\\dots,x_n,x_{n+1}}^{n+1}F]^2\\;\\mathrm{d}\\lambda^{n+1}(x_1,\\dots,x_n,x_{n+1})\\\\\r\n\t\t\t\t&=\\sum_{n=1}^\\infty \\frac{n}{n!}\\int\\E[D_{x_1,\\dots,x_n}^{n}F]^2\\;\\mathrm{d}\\lambda^n(x_1,\\dots,x_n)\\\\\r\n\t\t\t\t&=\\sum_{n=1}^{\\infty}nn!\\lVert f_n\\rVert_n^2\\\\\r\n\t\t\t\t\\text{and, similarly, }\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;&\\\\\r\n\t\t\t\t\\mathbb{E}\\left[\\int (D_{x,y}^2F)^2\\; \\mathrm{d}\\lambda^2(x,y)\\right]&=\\int \\sum_{n=0}^\\infty\\frac{1}{n!}\\int\\E[D_{x_1,\\dots,x_n}^n(D_{x,y}F)]^2\\;\\mathrm{d}\\lambda^{n}(x_1,\\dots,x_{n})\\;\\mathrm{d}\\lambda^2(x,y)\\\\\r\n\t\t\t\t&= \\sum_{n=0}^{\\infty}\\frac{1}{n!}\\int\\E[D_{x_1,\\dots,x_{n+2}}^{n+2}F]^2\\;\\mathrm{d}\\lambda^{n+2}(x_1,\\dots,x_{n+2})\\\\\r\n\t\t\t\t&=\\sum_{n=2}^\\infty \\frac{n(n-1)}{n!}\\int\\E[D_{x_1,\\dots,x_n}^{n}F]^2\\;\\mathrm{d}\\lambda^n(x_1,\\dots,x_n)\\\\\r\n\t\t\t\t&=\\sum_{n=1}^{\\infty}n(n-1)n!\\lVert f_n\\rVert_n^2.\r\n\t\t\t\\end{align*}\r\n\t\t\tTherefore, assumption \\eqref{condition} means that $\\sum_{n=1}^{\\infty}n!n\\lVert f_n\\rVert_n^2(\\alpha-n+1)\\geq 0$. Additionally, $\\left(n-\\frac{(\\alpha +2)}{2}\\right)^2\\geq 0$ implies $\\frac{(\\alpha+2)^2}{4}-n\\geq n(\\alpha-n+1)$ for any $n\\in\\mathbb{N}$. Thus, it holds\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\frac{(\\alpha+2)^2}{4}\\mathrm{Var}[F]-\\mathbb{E}\\left[\\int (D_{x}F)^2 \\;\\mathrm{d}\\lambda(x)\\right]&=\\sum_{n=1}^{\\infty}n!\\lVert f_n\\rVert_n^2\\left(\\frac{(\\alpha+2)^2}{4}-n\\right)\\\\&\\geq \\sum_{n=1}^{\\infty}n!\\lVert f_n\\rVert_n^2n(\\alpha-n+1)\\geq 0,\r\n\t\t\t\\end{align*}\r\n\t\t\twhich provides the lower bound for the variance in \\eqref{prop} for $\\alpha>0$.\r\n\t\t\t\t\r\n\t\t\tFor $\\alpha=0$ we have that $D_{x,y}F=0$ almost surely for $\\lambda$-a.e.\\ $x,y\\in\\mathbb{X}$. Hence, all difference operators of order greater than or equal to $2$ vanish almost surely for $\\lambda$-a.e.\\ $x,y\\in\\mathbb{X}$. Therefore, $\\lVert f_n\\rVert_n=0$ for all $n\\in\\mathbb{N}$ with $n\\geq 2$. It follows from the representation of the difference operator in terms of the kernels of the Fock space representation (see e.g. \\cite[Theorem 3]{L16}) that $D_xF=f_1(x)$ almost surely for $\\lambda$-a.e.\\ $x\\in\\mathbb{X}$, which provides the bound in Theorem \\ref{thm:varbound} for $\\alpha=0$.\r\n\t\t\\end{proof}\r\n\r\n\\begin{rema}\r\nNote that Fock space representations also exist for functionals of isonormal Gaussian processes and for functionals of Rademacher sequences (i.e.\\ sequences of independent random variables with values $\\pm 1$). For these one can also define operators $D$ and $D^2$ whose Fock space representations are as in the Poisson case. Since our proof of Theorem \\ref{thm:varbound} only requires the Fock space representations of $F$, $DF$ and $D^2F$, the statement of Theorem \\ref{thm:varbound} continues to hold for functionals of isonormal Gaussian processes and for functionals of Rademacher sequences if we rewrite the integrals with respect to $\\lambda$ in a proper way. For more details on the Fock space representations and the operators $D$ and $D^2$ we refer the reader to, for example, \\cite{NP12} for the Gaussian case and \\cite{KRT17} for the Rademacher case.\r\n\\end{rema}\t\t\r\n\t\r\n\t\t\\section{Spatial random graphs}\\label{sec:spatial_geometric_graphs}\r\n\t\t\r\n\t\tIn the following sections we apply our main result to problems from stochastic geometry. Therefore, we interpret Poisson processes as collections of random points in $\\mathbb{X}$, which is why we write from now on for $A\\subseteq\\mathbb{X}$ under abuse of notation\r\n\t\t\\begin{align*}\r\n\t\t\t\\eta\\cup A=\\eta+\\sum_{x\\in A}\\delta_x.\r\n\t\t\\end{align*}\r\n\t\tAnalogously, we use $\\eta\\cap A$ and $\\eta\\backslash A$.\r\n\t\tThroughout this paper, we denote by $\\lambda_d$ the $d$-dimensional Lebesgue measure and by $\\kappa_{d}$ the volume of the $d$-dimensional unit ball for $d\\geq 1$. The $d$-dimensional closed ball with centre $x$ and radius $r$ is denoted by $B^d(x,r)$.\r\n\t\t\r\n\t\tLet $W\\subset \\mathbb{R}^d$ be a non-empty compact convex set with $\\lambda_d(W)>0$. For $s\\geq 1$ let $\\eta_s$ be a homogeneous Poisson process on $W$ with intensity $s$, i.e.\\ a Poisson process on $\\mathbb{R}^d$ with intensity measure $\\lambda=s\\lambda_d|_{W}$, where $\\lambda_d|_{W}$ denotes the restriction of the Lebesgue measure to $W$. In the following we study the asymptotic behaviour as $s\\to\\infty$.\r\n\t\t\\subsection{Random geometric graph}\r\n\t\t\r\n\t\t In this section we consider the vector of degree counts and the vector of component counts of a random geometric graph. For both examples we know from \\cite[Section 3.2]{SY21} that, after centering and with a scaling of $s^{-1/2}$, they fulfil a quantitative central limit theorem in $d_2$- and $d_{convex}$- distance if the corresponding asymptotic covariance matrix is positive definite. In the following we show that the asymptotic covariance matrix is indeed positive definite.\r\n\t\t \r\n\t\t Let $G_{r_s}$ denote the random geometric graph that is generated by $\\eta_s$ and has radius $r_s=\\varrho s^{-1/d}$ for a fixed $\\varrho>0$, i.e.\\ the vertex set of the graph is $\\eta_s$ and two distinct vertices $v_1, v_2\\in \\eta_{s}$ are connected by an edge if $\\lVert v_1-v_2\\rVert\\leq r_s$.\r\n\t\t For $j\\in\\mathbb{N}_0$ let $V_j^{r_s}$ be the number of vertices of degree $j$ in $G_{r_s}$, i.e.\\\r\n\t\t \\begin{align*}\r\n\t\t \tV_j^{r_s}=\\sum_{y\\in\\eta_s}\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\},\r\n\t\t \\end{align*}\r\n\t\t where $\\mathrm{deg}(y,\\eta_s)$ stands for the degree of $y$ in $G_{r_s}$. Moreover, let $C_j^{r_s}$ denote the number of components of size $j$ in $G_{r_s}$, i.e.\\\r\n\t\t \\begin{align*}\r\n\t\t \tC_j^{r_s}=\\frac{1}{j}\\sum_{y\\in\\eta_s}\\mathbbm{1}\\{ \\lvert\\mathrm{C}(y,\\eta_s)\\rvert=j\\},\r\n\t\t \\end{align*}\r\n\t\t where $\\lvert\\mathrm{C}(y,\\eta_s)\\rvert$ is the number of vertices of the component $C(y,\\eta_s)$ of $y$ in $G_{r_s}$.\r\n\t\t\\begin{theorem}\r\n\t\t\t\\label{theorem:random_geometric_graph}\r\n\t\t\t\\begin{enumerate}\r\n\t\t\t\t\\item [a)]\t\tFor $s\\to\\infty$ the asymptotic covariance matrix of the vector of degree counts $\\frac{1}{\\sqrt{s}}(V_{j_1}^{r_s},\\dots,V_{j_n}^{r_s})$ for distinct $j_i\\in\\mathbb{N}_0$, $i\\in\\{1,\\dots,n\\}$, is positive definite, i.e.\\ for any $\\alpha=(\\alpha_1,\\dots,\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s$ sufficiently large\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\mathrm{Var}\\left[\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{r_s}\\right]\\geq cs.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\t\\item [b)]\r\n\t\t\t\tFor $s\\to\\infty$ the asymptotic covariance matrix of the vector of component counts $\\frac{1}{\\sqrt{s}}(C_{j_1}^{r_s},\\dots,C_{j_n}^{r_s})$ for distinct $j_i\\in\\mathbb{N}_0$, $i\\in\\{1,\\dots,n\\}$, is positive definite, i.e.\\ for any $\\alpha=(\\alpha_1,\\dots,\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s$ sufficiently large\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\mathrm{Var}\\left[\\sum_{i=1}^{n}\\alpha_iC_{j_i}^{r_s}\\right]\\geq cs.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\\end{enumerate}\r\n\t\t\\end{theorem}\r\n\t\tBefore we prove the theorem, we introduce the following lemma that provides condition \\eqref{condition}. It gives an estimate for the expected integral of the squared second-order difference operator of a stabilising Poisson functional. We call a Poisson functional $F_s$ stabilising if it can be written as a sum of scores, i.e.\\\r\n\t\t\\begin{align}\r\n\t\t\t\\label{eq:sum_of_scores}\r\n\t\t\tF_s=F_s(\\eta_s)=\\sum_{x\\in\\eta_s}\\xi_s(x,\\eta_s),\r\n\t\t\\end{align} \r\n\t\twhere the scores $\\xi_s$ are exponentially stabilising, fulfil a moment condition and decay exponentially fast with distance to a set $K$. For details on stabilising Poisson functionals and definitions see Section \\ref{appendix:stabilising_functionals}. \r\n\t\r\n\t\t\\begin{lemma}\r\n\t\t\t\\label{lemma:second_difference_operator}\r\n\t\t\tLet $F_s^{(1)},\\dots,F_s^{(n)}$ be Poisson functionals on $\\eta_s$, which can be written in the form of \\eqref{eq:sum_of_scores} and whose corresponding scores $\\xi_s^{(1)},\\dots,\\xi_s^{(n)}$ satisfy a $(4+p)$-th moment condition for $p>0$ and are exponentially stabilising.\r\n\t\t\tThen, for any $\\alpha=(\\alpha_1,\\dots,\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s\\geq 1$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathbb{E}\\Big[\\int_W\\int_W \\Big(\\sum_{i=1}^{n}\\alpha_iD_{x,y}^2F_{s}^{(i)}\\Big)^2\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\Big]\r\n\t\t\t\t&\\leq c s.\r\n\t\t\t\\end{align*}\r\n\t\t\\end{lemma}\r\n\t\\begin{proof}\r\n\t\tWe can apply \\cite[Lemma 5.5 and Lemma 5.9]{LSY19}, i.e.\\ for $i\\in\\{1,\\dots,n\\}$ and constants $\\varepsilon\\in(4,4+p)$, $\\beta>0$ there exist constants $C_\\varepsilon, C_\\beta>0$ such that \r\n\t\t\\begin{align}\r\n\t\t\t\\mathbb{E}\\lvert D_{x}F_{s}^{(i)}(\\eta_s\\cup A)\\rvert^{\\varepsilon}\\leq C_\\varepsilon\r\n\t\t\t\\label{lemma5.5}\r\n\t\t\\end{align}\r\n\t\tfor $A\\subset W$ with $\\lvert A\\rvert\\leq 1$, $x\\in W$ and $s\\geq 1$, where $\\lvert A\\rvert$ denotes the cardinality of $A$, and\r\n\t\t\\begin{align}\r\n\t\t\ts\\int_W \\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^\\beta\\;\\mathrm{d}y\\leq C_\\beta\r\n\t\t\t\\label{lemma5.9}\r\n\t\t\\end{align}\r\n\t\tfor $s\\geq1$ and $x\\in W$. Fix an $\\varepsilon\\in(4,4+p)$.\r\n\t\tUsing \\eqref{lemma5.5}, Hölder's inequality for $\\frac{\\varepsilon}{2}$ and $q=(1-\\frac{2}{\\varepsilon})^{-1}$ and Jensen's inequality provides\r\n\t\t\\begin{align*}\r\n\t\t\t\\mathbb{E}\\lvert D_{x,y}^2F_{s}^{(i)}\\rvert^2&=\\mathbb{E}\\left[\\lvert D_{x,y}^2F_{s}^{(i)}\\rvert^2\\mathbbm{1}\\{D_{x,y}^2F_{s}^{(i)}\\neq 0\\}\\right]\\\\\r\n\t\t\t&\\leq \t(\\mathbb{E}\\lvert D_{x,y}^2F_{s}^{(i)}\\rvert^{\\varepsilon})^{2/\\varepsilon}\\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^{1/q}\\\\\r\n\t\t\t&=\t(\\mathbb{E}\\lvert D_{x}F_{s}^{(i)}(\\eta_s\\cup\\{y\\})-D_{x}F_{s}^{(i)}(\\eta_s)\\rvert^{\\varepsilon})^{2/\\varepsilon}\\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^{1/q}\\\\\r\n\t\t\t&\\leq \t\\left(2^{\\varepsilon-1}\\left(\\mathbb{E}\\lvert D_{x}F_{s}^{(i)}(\\eta_s\\cup\\{y\\})\\rvert^{\\varepsilon}+\\mathbb{E}\\lvert D_{x}F_{s}^{(i)}(\\eta_s)\\rvert^{\\varepsilon}\\right)\\right)^{2/\\varepsilon}\\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^{1/q}\\\\\r\n\t\t\t&\\leq 4C_\\varepsilon^{2/\\varepsilon}\\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^{1/q}\r\n\t\t\\end{align*}\r\n\t\tfor $i\\in\\{1,\\dots,n\\}$. Therefore, using Jensen's inequality and \\eqref{lemma5.9}, it follows\r\n\t\t\\allowdisplaybreaks\r\n\t\t\\begin{align*}\r\n\t\t\t&\\mathbb{E}\\Big[\\int_W\\int_W \\Big(D_{x,y}^2\\sum_{i=1}^{n}\\alpha_iF_{s}^{(i)}\\Big)^2\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\Big]\\\\&\r\n\t\t\t\\leq \\int_W\\int_W \t\\mathbb{E}\\Big[n\\sum_{i=1}^{n}\\alpha_i^2(D_{x,y}^2F_{s}^{(i)})^2\\Big]\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\\\&\r\n\t\t\t= n\\sum_{i=1}^{n}\\alpha_i^2\\int_W\\int_W \t\t\\mathbb{E}\\lvert D_{x,y}^2F_{s}^{(i)}\\rvert^2\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\\\&\r\n\t\t\t\\leq n\\sum_{i=1}^{n}\\alpha_i^2 s\\int_W s\\int_W 4C_\\varepsilon^{2/\\varepsilon}\\mathbb{P}(D_{x,y}^2F_{s}^{(i)}\\neq 0)^{1/q}\\;\\mathrm{d}x\\;\\mathrm{d}y\\\\\r\n\t\t\t&\\leq n\\sum_{i=1}^{n}\\alpha_i^2s\\int_W 4C_\\varepsilon^{2/\\varepsilon}C_{1/q}\\;\\mathrm{d}x\\leq cs\r\n\t\t\\end{align*}\r\n\t\tfor some constant $c>0$, which completes the proof.\r\n\t\\end{proof}\r\n\t\t\\begin{proof}[Proof of Theorem \\ref{theorem:random_geometric_graph}]\r\n\t\t\tFor $x\\in W$ and $j\\in\\mathbb{N}_0$ the difference operators are given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_xV_j^{r_s}=\\mathbbm{1}\\{\\mathrm{deg}(x,\\eta_s\\cup\\{x\\})=j\\}+\\sum_{y\\in \\eta_s}(\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s\\cup\\{x\\})=j\\}-\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\})\r\n\t\t\t\\end{align*}\r\n\t\t\tand \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&D_xC_j^{r_s}=\\frac{1}{j}\\mathbbm{1}\\{\\lvert \\mathrm{C}(x,\\eta_s\\cup\\{x\\})\\rvert=j\\}+\\frac{1}{j}\\sum_{y\\in\\eta_s}(\\mathbbm{1}\\{\\lvert\\mathrm{C}(y,\\eta_s\\cup\\{x\\})\\rvert=j\\}-\\mathbbm{1}\\{\\lvert \\mathrm{C}(y,\\eta_s)\\rvert=j\\}).\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $m=\\mathrm{argmax}_{i\\in\\{1,\\dots,n\\}:\\alpha_i\\neq 0}j_i$ and $x\\in W$.\r\n\t\t\tFor a) we consider configurations where \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\eta_s\\left(B^d\\left(x,\\frac{r_s}{2}\\right)\\right)=j_m+1 \\quad\\text{ and }\\quad \\eta_s\\Big(B^d\\Big(x,\\frac{3}{2}r_s\\Big)\\Big\\backslash B^d\\Big(x,\\frac{r_s}{2}\\Big)\\Big)=0.\r\n\t\t\t\\end{align*} \r\n\t\t\tThen, it follows for any $y\\in\\eta_s$ with $y\\in B^d(x,\\frac{r_s}{2})\\cap W$ that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{deg}(y,\\vartheta)=\\begin{cases}\r\n\t\t\t\t\tj_m, &\\text{ for }\\vartheta=\\eta_s,\\\\\r\n\t\t\t\t\tj_m+1, &\\text{ for }\\vartheta=\\eta_s\\cup\\{x\\}.\r\n\t\t\t\t\\end{cases}\r\n\t\t\t\\end{align*}\r\n\t\t\tThe degrees of all the other points are not affected by adding $x$. Thus, in this situation only the numbers of points with degree $j_m$ and $j_m+1$ change. Due to the choice of $m$, we have\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\left\\lvert D_x\\left(\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{r_s}\\right)\\right\\rvert=\\left\\lvert\\sum_{i=1}^{n}\\alpha_iD_xV_{j_i}^{r_s}\\right\\rvert=\\lvert\\alpha_mD_xV_{j_m}^{r_s}\\rvert=\\left\\lvert\\alpha_{m}(-(j_m+1))\\right\\rvert\\geq \\lvert\\alpha_{m}\\rvert.\r\n\t\t\t\\end{align*}\r\n\t\t\tFor b) we consider configurations where \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\eta_s\\Big(B^d\\Big(x,\\frac{r_s}{2}\\Big)\\Big)=j_m \\quad\\text{ and }\\quad \\eta_s\\Big(B^d\\Big(x,\\frac{3}{2}r_s\\Big)\\Big\\backslash B^d\\Big(x,\\frac{r_s}{2}\\Big)\\Big)=0.\r\n\t\t\t\\end{align*}\r\n\t\t\tIt follows that $C_{j_m}^{r_s}$ decreases by $1$ by adding $x$ and $C_{j_m+1}^{r_s}$ increases by 1. The other component counts are not affected. Because of the choice of $m$, it holds\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\left\\lvert D_x\\left(\\sum_{i=1}^{n}\\alpha_iC_{j_i}^{r_s}\\right)\\right\\rvert=\\left\\lvert\\sum_{i=1}^{n}\\alpha_iD_xC_{j_i}^{r_s}\\right\\rvert=\\lvert\\alpha_mD_xC_{j_m}^{r_s}\\rvert=\\left\\lvert\\alpha_{m}\\right\\rvert.\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $A_s=\\{x\\in W:B^d(x,\\frac{r_s}{2})\\subset W\\}$. Then, for $F_{j_i}^{r_s}=V_{j_i}^{r_s}$ or $F_{j_i}^{r_s}=C_{j_i}^{r_s}$ for $i\\in\\{1,\\dots, n\\}$ and \r\n\t\t\t$$\r\n\t\t\tk=\\begin{cases}\r\n\t\t\t\tj_m+1, &\\text{ for }F_{j_i}^{r_s}=V_{j_i}^{r_s},\\\\\r\n\t\t\t\tj_m, &\\text{ for }F_{j_i}^{r_s}=C_{j_i}^{r_s},\r\n\t\t\t\\end{cases}\r\n\t\t\t$$\r\n\t\t\tit follows for $s$ sufficiently large such that $\\lambda_d(A_s)\\geq\\frac{\\lambda_d(W)}{2}$ that\r\n\t\t\t\\allowdisplaybreaks\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\int_W \\Big(\\sum_{i=1}^{n}\\alpha_iD_xF_{j_i}^{r_s}\\Big)^2\\;\\mathrm{d}\\lambda(x)\\geq s\\alpha_{m}^2\\int_W\\mathbb{P}\\Big(\\Big\\lvert \\sum_{i=1}^{n}\\alpha_iD_xF_{j_i}^{r_s}\\Big\\rvert\\geq\\rvert\\alpha_{m}\\rvert\\Big)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\\alpha_{m}^2\\int_{A_s}\\mathbb{P}\\Big(\\eta_s\\Big(B^d\\Big(x,\\frac{r_s}{2}\\Big)\\Big)=k,\\eta_s\\Big(B^d\\Big(x,\\frac{3}{2}r_s\\Big)\\Big\\backslash B^d\\Big(x,\\frac{r_s}{2}\\Big)\\Big)=0\\Big)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\\alpha_{m}^2\\int_{A_s}\\frac{(s\\kappa_dr_s^d)^{k}}{2^{dk}k!}e^{-s\\kappa_dr_s^d/2^d}e^{-s\\kappa_d(3^d-1)r_s^d/2^d}\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\\alpha_{m}^2\\frac{\\lambda_d(W)}{2}\\frac{(\\kappa_d\\varrho^d)^{k}}{2^{dk}k!}e^{-\\kappa_d3^d\\varrho^d/2^d} =:c\\cdot s,\r\n\t\t\t\\end{align*}\r\n\t\t\twhere $c>0$ depends on $W,\\alpha,\\varrho, k$ and $d$.\r\n\t\t\t\r\n\t\t\tBoth functionals can be written as sums of scores as in \\eqref{eq:sum_of_scores}. For $j\\in\\mathbb{N}_0$, $y\\in \\eta_s$ and $s\\geq 1$ the score for the degree count of degree $j$ is given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\xi_s(y,\\eta_s)=\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\}\r\n\t\t\t\\end{align*}\r\n\t\t\tand for $j\\in\\mathbb{N}$, $y\\in\\eta_s$ and $s\\geq 1$ the score for the number of components of size $j$ is\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\xi_s(y,\\eta_s)=\\frac{1}{j}\\mathbbm{1}\\{\\lvert \\mathrm{C}(y,\\eta_s)\\rvert=j\\}.\r\n\t\t\t\\end{align*}\r\n\t\t\tThese scores clearly fulfil a $(4+p)$-th moment condition and are \r\n\t\t\tby \\cite[proofs of Theorem 3.5 (b) and Theorem 3.6 (b)]{SY21} exponentially stabilising. Therefore, we can apply Lemma \\ref{lemma:second_difference_operator}, which completes together with Theorem \\ref{thm:varbound} the proof.\r\n\t\t\\end{proof}\r\n\t\t\r\n\t\t\\subsection{$k$-nearest neighbour graph}\r\n\t\t\tCentral limit theorems for the total edge length of a $k$-nearest neighbour graph of a Poisson process are derived in e.g.\\ \\cite{AB93,BY05,LSY19,LPS16,PY01,PY05,SY21}. The first quantitative result can be found in \\cite{AB93}. This convergence rate was further improved in \\cite{PY05} before in \\cite{LPS16} the presumably optimal rate was shown. In \\cite{SY21} this result was transferred to the multivariate case of a vector of edge length functionals but it was left open to show in general that its asymptotic covariance matrix is positive definite. For edge length functionals of nonnegative powers this is proven in the following section.\r\n\t\t\t\r\n\t\t We consider the $k$-nearest neighbour graph for $k\\in\\mathbb{N}$ that is generated by the Poisson process $\\eta_s$, i.e.\\ the undirected graph with vertex set $\\eta_s$, where each vertex is connected with its $k$-nearest neighbours. The set of all $k$-nearest neighbours of $v_1\\in\\eta_s$ contains almost surely all $v_2\\in\\eta_s\\backslash\\{v_1\\}$ for which $\\lVert v_1-v_2\\rVert\\geq\\rVert v_1-x\\lVert$ for at most $k-1$ vertices $x\\in\\eta_s\\backslash\\{v_1\\}$ or $\\eta_{s}(B^d(v_1,\\lVert v_1-v_2\\rVert)\\backslash\\{v_1\\})\\leq k-1$.\r\n\t\tFor $q\\in[0,\\infty)$ let $L_q$ denote the edge length functional of power $q$ of the $k$-nearest neighbour graph generated by $\\eta_s$ which is defined by\r\n\t\\begin{align*}\r\n\t\tL_q=\\frac{1}{2}\\sum_{(y,z)\\in\\eta_{s,\\neq}^2}\\mathbbm{1}\\{z\\in N(y,\\eta_s) \\text{ or }y\\in N(z,\\eta_s)\\}\\lVert y-z\\rVert^q,\r\n\t\\end{align*}\r\n\twhere $\\eta_{s,\\neq}^2$ denotes the set of all pairs of disjoint points of $\\eta_{s}$ and $N(y,\\eta_s)$ is the set of all $k$-nearest neighbours of $y$ in the $k$-nearest neighbour graph generated by $\\eta_s$. Let $F_q=s^{q/d}L_q$ be its scaled version.\r\n\t\t\\begin{theorem}\r\n\t\t\t\\label{theorem:knn_edge_length}\r\n\t\t\tFor $s\\to\\infty$ the asymptotic covariance matrix of $\\frac{1}{\\sqrt{s}}(F_{q_1},\\dots,F_{q_n})$ for distinct $q_i\\geq 0$, $i\\in\\{1,\\dots,n\\}$, is positive definite, i.e.\\ for any $\\alpha=(\\alpha_1,\\dots,\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s$ sufficiently large\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{Var}\\left[\\sum_{i=1}^n\\alpha_iF_{q_i}\\right]\\geq cs.\r\n\t\t\t\\end{align*}\r\n\t\t\\end{theorem} In order to prove this theorem, we need the following lemma, which considers a slightly more general situation since it will be also employed in a further proof.\r\n\t\t\\begin{lemma}\r\n\t\t\t\\label{lemma_By}\r\n\t\t\tLet $k\\in\\mathbb{N}$ and $j\\geq 1$ be fixed. Then there exist constants $c_1,c_2>0$ depending on $k,j,d$ and $W$ such that for all $\\varepsilon>0$ and $x\\in W$ with $B^d(x,2(j+1)\\varepsilon)\\subset W$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathbb{P}(\\exists y\\in \\eta_s\\backslash B^d(x, j\\varepsilon):\\eta_s(A_{j,\\varepsilon}(x,y))\\leq k-1)\\leq c_1e^{-sc_2\\varepsilon^d},\r\n\t\t\t\\end{align*}\r\n\t\twhere $A_{j,\\varepsilon}(x,y)=(B^d(y,\\lVert x-y\\rVert-(j-1)\\varepsilon)\\cap W)\\backslash (B^d(x,j\\varepsilon)\\cup\\{y\\})$.\r\n\t\t\\end{lemma}\r\n\t\t\\begin{proof}\r\n\t\t\tLet $x\\in W$ with $B^d(x,2(j+1)\\varepsilon)\\subset W$. Then, for $y\\in W$ with $j\\varepsilon< \\lVert x-y\\rVert\\leq(j+1)\\varepsilon$ we have that $B^d(y,\\rVert x-y\\lVert)\\subset W$. Therefore, since $y\\notin B^d(x,j\\varepsilon)$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lambda_d(A_{j,\\varepsilon}(x,y))\\geq \\frac{1}{2}\\kappa_{d}(\\lVert x-y\\rVert-(j-1)\\varepsilon)^d. \r\n\t\t\t\\end{align*}\r\n\t\t\tFor $y\\in W$ with $\\lVert x-y\\rVert> (j+1)\\varepsilon$ it holds that $\\lVert x-y\\rVert-j\\varepsilon\\geq \\frac{1}{2}(\\lVert x-y\\rVert -(j-1)\\varepsilon)$. Moreover, $(B^d(y,\\lVert x-y\\rVert-j\\varepsilon)\\cap W)\\backslash\\{y\\}\\subset A_{j,\\varepsilon}(x,y)$. Hence, with \\cite[Lemma 7.4]{LPS16} there is a constant $c_W>0$ only depending on $W$ such that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lambda_d(A_{j,\\varepsilon}(x,y))&\\geq \\lambda_d(B^d(y,\\lVert x-y\\rVert-j\\varepsilon)\\cap W)\\geq c_W(\\lVert x-y\\rVert-j\\varepsilon)^d\\\\&\\geq c_W\\Big(\\frac{1}{2}(\\lVert x-y\\rVert -(j-1)\\varepsilon)\\Big)^d.\r\n\t\t\t\\end{align*}\r\n\t\t\tAltogether, for $y\\in W\\backslash B^d(x,j\\varepsilon)$ it follows\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\lambda_d(A_{j,\\varepsilon}(x,y))\\geq c(\\lVert x-y\\rVert-(j-1)\\varepsilon)^d\r\n\t\t\t\\end{align*}\r\n\t\t\tfor some constant $c>0$.\r\n\t\t\tFor $t\\in\\mathbb{N}_0$ there exist constants $\\tilde{c}_1,\\tilde{c}_2>0$ such that $z^te^{-z}\\leq \\tilde{c}_1e^{-\\tilde{c}_2z}$ for all $z>0$.\r\n\t\t\tHence, using the Mecke formula and spherical coordinates, we get \r\n\t\t\t\\begin{align*}\r\n\t\t\t&\\mathbb{P}(\\exists y\\in \\eta_s\\backslash B^d(x, j\\varepsilon):\\eta_s(A_{j,\\varepsilon}(x,y))\\leq k-1)\\\\\r\n\t\t\t&\\leq \\mathbb{E}\\Bigg[\\sum_{y\\in\\eta_{s}\\backslash B^d(x, j\\varepsilon)}\\mathbbm{1}\\{\\eta_s(A_{j,\\varepsilon}(x,y))\\leq k-1\\}\\Bigg]\\\\\r\n\t\t\t&\\leq s\\int_{\\mathbb{R}^d\\backslash B^d(x,j\\varepsilon)}\\mathbb{P}(\\eta_s(A_{j,\\varepsilon}(x,y))\\leq k-1)\\;\\mathrm{d}y\\\\\r\n\t\t\t&=s\\int_{\\mathbb{R}^d\\backslash B^d(x,j\\varepsilon)}\\sum_{i=0}^{k-1}\\frac{\\lambda(A_{j,\\varepsilon}(x,y))^i}{i!}e^{-\\lambda(A_{j,\\varepsilon}(x,y))}\\;\\mathrm{d}y\\\\\r\n\t\t\t&\\leq s\\int_{\\mathbb{R}^d\\backslash B^d(x,j\\varepsilon)}\\hat{c}_1e^{-\\hat{c}_2\\lambda(A_{j,\\varepsilon}(x,y))}\\;\\mathrm{d}y\\\\\r\n\t\t\t&\\leq \\int_{\\mathbb{R}^d\\backslash B^d(x,j\\varepsilon)}\\hat{c}_1se^{-\\hat{c}_2sc(\\lVert x-y\\rVert-(j-1)\\varepsilon)^d}\\;\\mathrm{d}y\\\\\r\n\t\t\t&=d\\kappa_d\\int_{\\varepsilon}^\\infty \\hat{c}_1s(r+(j-1)\\varepsilon)^{d-1}e^{-\\hat{c}_2scr^d}\\;\\mathrm{d}r\\leq c_1e^{-sc_2\\varepsilon^d}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor suitable constants $\\hat{c}_1,\\hat{c}_2,c_1,c_2>0$.\r\n\t\t\\end{proof}\r\n\t\t\\begin{proof}[Proof of Theorem \\ref{theorem:knn_edge_length}]\r\n\t\t\tLet $e_i$ denote the $d$-dimensional standard unit vector in the $i$-th direction. For $\\varepsilon>0$, $x\\in W$ with $B^d(x,4\\varepsilon)\\subset W$ and $\\hat{x}=x+\\frac{3}{4}\\varepsilon e_1$, we consider configurations where $\\eta_s(B^d(\\hat{x},\\varepsilon/4))=k+1$, $\\eta_s(B^d(x,\\varepsilon)\\backslash B^d(\\hat{x},\\varepsilon/4))=0$ and $\\eta_s(A_{1,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,\\varepsilon)$, where $A_{1,\\varepsilon}(x,y)$ is defined as in Lemma \\ref{lemma_By}. Then, for $q\\geq 0$ the difference operator of $F_q$ is given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_xF_q&=s^{q/d}\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\lVert x-y\\rVert^q.\r\n\t\t\t\\end{align*}\r\n\t\t\tInserting $j=1$ in Lemma \\ref{lemma_By} provides\r\n\t\t\\begin{align*}\r\n\t\t\t\\mathbb{P}(\\exists y\\in \\eta_s\\backslash B^d(x, \\varepsilon):\\eta_s(A_{1,\\varepsilon}(x,y))\\leq k-1)\\leq c_1e^{-sc_2\\varepsilon^d}\r\n\t\t\\end{align*}\r\n\t\tfor some constants $c_1,c_2>0$.\r\n\t\t\r\n\t\t\tNow, let $m=\\mathrm{argmax}_{i\\in\\{1,\\dots,n\\}:\\alpha_i\\neq 0}q_i$ and assume without loss of generality $\\alpha_m>0$.\r\n\t\t\tIf $\\alpha_{i}\\geq 0$ for all $i\\in\\{1,\\dots,n\\}$, we choose $\\varepsilon=\\bar{c}s^{-1/d}$ with $\\bar{c}\\geq 1$ large enough such that we have for the configurations mentioned above\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iF_{q_i}\\geq \\alpha_ms^{q_m/d}\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\lVert x-y\\rVert^{q_m}\\geq \\alpha_mk\\left(\\frac{s^{1/d}\\varepsilon}{2}\\right)^{q_m}\\geq 1\r\n\t\t\t\\end{align*}\r\n\t\t\tand $c_1e^{-sc_2\\varepsilon^d}<\\frac{1}{2}$.\r\n\t\t\tOtherwise, let $\\ell=\\mathrm{argmax}_{i\\in\\{1,\\dots,n\\}:\\alpha_i<0}q_i$. Then, $q_m>q_\\ell$ and it follows for the configurations mentioned above for $s^{1/d}\\varepsilon\\geq1$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&D_x\\sum_{i=1}^n\\alpha_iF_{q_i}=\\sum_{i=1}^n\\alpha_is^{q_i/d}\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\lVert x-y\\rVert^{q_i}\\\\\r\n\t\t\t\t&\\geq \\alpha_ms^{q_m/d}\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\lVert x-y\\rVert^{q_m}-\\sum\\limits_{\\substack{i\\in \\{1,\\dots,n\\}: \\\\ \\alpha_i<0}}(-\\alpha_i)s^{q_i/d}\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\lVert x-y\\rVert^{q_i}\\\\\r\n\t\t\t\t&\\geq \\alpha_m\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}\\left(s^{1/d}\\lVert x-y\\rVert\\right)^{q_m}-\\sum\\limits_{\\substack{i\\in \\{1,\\dots,n\\}: \\\\ \\alpha_i<0}}(-\\alpha_i)\\sum_{y\\in N(x,\\eta_s\\cup\\{x\\})}(s^{1/d}\\varepsilon)^{q_i}\\\\\r\n\t\t\t\t&\\geq \\alpha_mk\\left(\\frac{s^{1/d}\\varepsilon}{2}\\right)^{q_m}-\\sum\\limits_{\\substack{i\\in \\{1,\\dots,n\\}: \\\\ \\alpha_i<0}}(-\\alpha_i)k(s^{1/d}\\varepsilon)^{q_\\ell}\\\\\r\n\t\t\t\t&\\geq k(s^{1/d}\\varepsilon)^{q_\\ell}\\Bigg(\\alpha_m\\frac{1}{2^{q_m}}\\left(s^{1/d}\\varepsilon\\right)^{q_m-q_\\ell}-\\sum\\limits_{\\substack{i\\in \\{1,\\dots,n\\}: \\\\ \\alpha_i<0}}(-\\alpha_i)\\Bigg).\r\n\t\t\t\\end{align*}\r\n\t\t\tIn this case, choose $\\varepsilon= s^{-1/d}\\bar{c}>0$ with $\\bar{c}\\geq 1$ large enough such that $c_1e^{-sc_2\\varepsilon^d}<\\frac{1}{2}$ and\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iF_{q_i}\\geq\\alpha_m\\frac{1}{2^{q_m}}\\left(s^{1/d}\\varepsilon\\right)^{q_m-q_\\ell}-\\sum\\limits_{\\substack{i\\in \\{1,\\dots,n\\}: \\\\ \\alpha_i<0}}(-\\alpha_i)\\geq 1.\r\n\t\t\t\\end{align*} \r\n\t\t\tLet $A_s=\\{x\\in W:B^d(x,4\\varepsilon)\\subset W\\}$. Due to the independence of $\\eta_{s}(B^d(\\hat{x},\\varepsilon/4))$, $\\eta_{s}(B^d(x,\\varepsilon)\\backslash B^d(\\hat{x},\\varepsilon/4))$ and $\\eta_{s}(A_{1,\\varepsilon}(x,y))$ for $y\\in\\eta_s\\backslash B^d(x,\\varepsilon)$ and $x\\in A_s$ and by Lemma \\ref{lemma_By} we have for $s$ large enough such that $\\lambda_d(A_s)\\geq\\frac{\\lambda_d(W)}{2}$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\Big[\\int_W\\Big(D_x\\sum_{i=1}^n\\alpha_iF_{q_i}\\Big)^2\\;\\mathrm{d}\\lambda(x)\\Big]\\geq s\t\\int_W\\mathbb{P}\\Big(D_x\\sum_{i=1}^n\\alpha_iF_{q_i}\\geq 1 \\Big)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\t\\int_W\\mathbb{P}\\big(\\eta_s(B^d(\\hat{x},\\varepsilon/4))=k+1, \\eta_s(B^d(x,\\varepsilon)\\backslash B^d(\\hat{x},\\varepsilon/4))=0,\\\\&\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\eta_s(A_{1,\\varepsilon}(x,y))\\geq k \\;\\forall y\\in\\eta_s\\backslash B^d(x,\\varepsilon)\\big)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\t\\int_{A_s}\\frac{(s\\kappa_d\\varepsilon^d)^{k+1}}{4^{d(k+1)}(k+1)!}e^{-s\\kappa_d\\varepsilon^d/4^d}e^{-s\\kappa_d\\varepsilon^d(1-1/4^d)}(1-c_1e^{-sc_2\\varepsilon^d})\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\t\\frac{\\lambda_d(W)}{2}\\frac{(\\kappa_d\\bar{c}^d)^{k+1}}{4^{d(k+1)}(k+1)!}e^{-\\kappa_d\\bar{c}^d}\\cdot\\frac{1}{2} =:c_{q,\\alpha,k,W,d}s.\r\n\t\t\t\\end{align*}\r\n\t\t\tOur functionals can be written as sums of scores as in \\eqref{eq:sum_of_scores}. For $y\\in\\eta_s$, $q\\geq0$ and $s\\geq 1$ the corresponding score of $F_q$ is given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\xi_s(y,\\eta_s)=\\sum_{z\\in N(y,\\eta_s)}\\mathbbm{1}\\{y\\in N(z,\\eta_s)\\}\\frac{\\Vert y-z\\rVert^q}{2}+\\mathbbm{1}\\{y\\notin N(z,\\eta_s)\\}\\Vert y-z\\rVert^q.\r\n\t\t\t\\end{align*} \r\n\t\t\tThe scores $(\\xi_s)_{s\\geq 1}$ fulfil a $(4+p)$-th moment condition (see the proof of \\cite[Theorem 3.1]{LSY19}) and are\r\n\t\t\tby \\cite[proof of Theorem 3.1]{SY21} exponentially stabilising. Therefore, we can apply Lemma \\ref{lemma:second_difference_operator}, which completes together with Theorem \\ref{thm:varbound} the proof.\r\n\t\t\\end{proof}\r\n\t\tIn the following we consider a second statistic of $k$-nearest neighbour graphs, namely the number of vertices with a given degree. Similarly to the previous example, it was shown in \\cite[Theorem 3.3]{SY21} that a vector of these degree counts fulfils a quantitative multivariate central limit theorem in $d_2$- and $d_{convex}$-distance if its asymptotic covariance matrix is positive definite. \r\n\t\t\r\n\t\tFor $j\\in\\mathbb{N}_0$ let $V_j^{k}$ denote the number of vertices of degree $j$ in the $k$-nearest neighbour graph generated by $\\eta_s$, i.e.\\\r\n\t\\begin{align*}\r\n\t\tV_j^{k}=\\sum_{y\\in\\eta_s}\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\}.\r\n\t\\end{align*}\r\n\t\tWe study the vector $(V_{j_1}^k,\\dots, V_{j_n}^k)$ for distinct $j_i\\geq k$, $i\\in\\{1,\\dots,n\\}$. By \\cite[Lemma 8.4]{Y98} the vertices of a $k$-nearest neighbour graph have bounded degree. Therefore, we consider $j_i\\in \\{k,k+1,\\dots,k_{\\mathrm{max}}\\}$ for $i\\in\\{1,\\dots,n\\}$, where $k_{\\mathrm{max}}$ denotes the maximal possible degree that occurs with a positive probability.\r\n\t\t\\begin{theorem}\r\n\t\t\r\n\t\t\tFor $d\\geq 2$, $n\\leq k_\\mathrm{max}-k+1$ and $s\\to\\infty$ the asymptotic covariance matrix of $\\frac{1}{\\sqrt{s}}(V_{j_1}^k,\\dots, V_{j_n}^k)$ for distinct $j_i\\in \\{k,k+1,\\dots,k_{\\mathrm{max}}\\}$, $i\\in\\{1,\\dots,n\\}$, is positive definite, i.e.\\ for any $\\alpha=(\\alpha_1,\\dots\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s$ sufficiently large\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{Var}\\left[\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{k}\\right]\\geq cs.\r\n\t\t\t\\end{align*}\r\n\t\t\t\r\n\t\t\\end{theorem}\r\n\t\t\\begin{proof}\r\n\t\t\tFirst note that the degrees $j_1,\\dots,j_n$ are chosen in such a way that they can occur in a $k$-nearest neighbour graph. A vertex can have $k$ neighbours if it is only connected to its $k$ nearest neighbours and can have up to $k_{\\mathrm{max}}$ neighbours by the definition of $k_{\\mathrm{max}}$. All degrees in between can occur as well as can be seen from the following construction. Assume we have a configuration where $x$ has $k_{\\mathrm{max}}$ neighbours. Then we delete $1\\leq t\\leq k_{\\mathrm{max}}-k$ vertices which are connected to $x$ but are not one of the $k$ nearest neighbours of $x$ and all other vertices that are not connected to $x$. Consequently, we obtain a configuration where $x$ has degree $k_{\\mathrm{max}}-t$.\r\n\t\t\tThis means that $\\mathbb{P}(\\mathrm{deg}(x,\\beta_{j_i}\\cup\\{x\\})=j_i)>0$ for $i\\in\\{1,\\dots,n\\}$, where $\\beta_{j_i}$ denotes a binomial point process of $j_i$ independent random points uniformly distributed in $B^d(0,1)$. Obviously, these probabilities do not change if we take a binomial point process on any other ball.\r\n\t\t\t\r\n\t\t\tThe difference operator of $V_j^{k}$ is given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_xV_j^{k}=\\mathbbm{1}\\{\\mathrm{deg}(x,\\eta_s\\cup\\{x\\})=j\\}+\\sum_{y\\in\\eta_s}(\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s\\cup\\{x\\})=j\\}-\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\})\r\n\t\t\t\\end{align*}\r\n\t\t\tfor $x\\in W$.\r\n\t\t\tDenote $I=\\{i\\in\\{1,\\dots,n\\}:\\alpha_i\\neq0\\}$ and $m=\\mathrm{argmin}_{i\\in I}j_i$. We can assume $\\alpha_m>0$ without loss of generality. In the following we distinct several cases that are illustrated in Figure \\ref{fig:kNN}.\r\n\t\t\t\r\n\t\t\t\\medskip\r\n\t\t\t\r\n\t\t\t\\noindent \\textit{Case 1: $j_m>k$}\\\\\r\n\t\t\tLet $\\varepsilon>0$ and $x\\in W$ with $B^d(x, 8\\varepsilon)\\subset W$.\r\n\t\t\tWe consider configurations where $\\eta_s(B^d(x,\\varepsilon))=j_m,\\eta_s(B^d(x,3\\varepsilon)\\backslash B^d(x,\\varepsilon))=0$ and $\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)$ with $A_{3,\\varepsilon}(x,y)$ defined as in Lemma \\ref{lemma_By}. Applying\r\n\t\t\tLemma \\ref{lemma_By} for $j=3$ provides\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathbb{P}(\\exists y\\in \\eta_s\\backslash B^d(x, 3\\varepsilon):\\eta_s(A_{3,\\varepsilon}(x,y))\\leq k-1)\r\n\t\t\t\t&\\leq c_1e^{-sc_2\\varepsilon^d}.\r\n\t\t\t\\end{align*}\r\n\t\t\tNow, choose $\\varepsilon=\\bar{c} s^{-1/d}>0$ for $\\bar{c}>1$ such that $c_1e^{-sc_2\\varepsilon^d}\\leq\\frac{1}{2}$.\r\n\t\t\tThen, $x$ is connected to all $z\\in \\eta_s\\cap B^d(x,\\varepsilon)$ and we have\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iV_{j_i}^k\\geq\\alpha_m.\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $A_s=\\{x\\in W:B^d(x,8\\varepsilon)\\subset W\\}$ and $s$ large enough such that $\\lambda_d(A_s)>\\frac{\\lambda_d(W)}{2}$. Then, using independence properties we have for $p_m=\\mathbb{P}\\left(\\mathrm{deg}(x,\\beta_{j_m}\\cup\\{x\\})=j_m\\right)>0$,\r\n\t\t\t\\allowdisplaybreaks\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\Big[\\int_W \\Big(D_x\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{k}\\Big)^2\\;\\mathrm{d}\\lambda(x)\\Big]\\geq \\alpha_m^2\\int_W\\mathbb{P}\\Big(D_x\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{k}\\geq\\alpha_m\\Big)\\;\\mathrm{d}\\lambda(x)\\\\\r\n\t\t\t\t&\\geq \\alpha_m^2\\int_{A_s}\\mathbb{P}\\left(\\eta_s(B^d(x,\\varepsilon))=j_m,\\eta_s(B^d(x,3\\varepsilon)\\backslash B^d(x,\\varepsilon))=0,\\mathrm{deg}(x,\\eta_s|_{B^d(x,\\varepsilon)}\\cup\\{x\\})=j_m\\right)\\\\\r\n\t\t\t\t&\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\cdot\\mathbb{P}\\left(\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k\\; \\forall y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)\\right)\\;\\mathrm{d}\\lambda(x)\\\\\r\n\t\t\t\t&\\geq s\\frac{\\alpha_m^2}{2} \\int_{A_s}\\mathbb{P}\\left(\\eta_s(B^d(x,\\varepsilon))=j_m,\\eta_s(B^d(x,3\\varepsilon)\\backslash B^d(x,\\varepsilon))=0\\right)\\\\&\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\;\\cdot\\mathbb{P}\\left(\\mathrm{deg}(x,\\eta_s|_{B^d(x,\\varepsilon)}\\cup\\{x\\})=j_m|\\eta_s(B^d(x,\\varepsilon))=j_m\\right)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&=s\\frac{\\alpha_m^2}{2}\\int_{A_s}\\frac{(s\\kappa_d\\varepsilon^d)^{j_m}}{j_m!}e^{-s\\kappa_d\\varepsilon^d}e^{-s\\kappa_d(3^d-1)\\varepsilon^d}\\mathbb{P}\\left(\\mathrm{deg}(x,\\beta_{j_m}\\cup\\{x\\})=j_m\\right)\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\geq s\\frac{\\alpha_m^2}{2}\\frac{(s\\kappa_d\\varepsilon^d)^{j_m}}{j_m!}e^{-s\\kappa_d3^d\\varepsilon^d}p_m\\frac{\\lambda_d(W)}{2}=:c_{\\alpha,k,W,d}s.\r\n\t\t\t\\end{align*}\r\n\t\t\t\\textit{Case 2: $j_m=k$.}\\\\\r\n\t\t\tIf it exists, we denote by $\\ell\\in\\{1,\\dots,n\\}$ the index with $j_\\ell=k+1$. Then,\\begin{align*}\r\n\t\t\t\t\\hat{\\alpha}=\\begin{cases}\r\n\t\t\t\t\t\\alpha_{\\ell}, &\\text{ if $\\ell$ exists,}\\\\\r\n\t\t\t\t\t0, &\\text{ if $\\ell$ does not exist.}\r\n\t\t\t\t\\end{cases}\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $\\varepsilon>0$ and let $x\\in W$ be such that $B^d(x,8\\varepsilon)\\subset W$. We consider four different configurations to deal with all possible vectors $\\alpha=(\\alpha_1,\\dots,\\alpha_n)\\in\\mathbb{R}^n\\backslash\\{0\\}$ (see Figure \\ref{fig:kNN}). Let $e_i$ denote the $d$-dimensional standard unit vector in the $i$-th direction.\r\n\t\t\t\\begin{enumerate}\r\n\t\t\t\t\\item \t\\textit{$k\\in\\mathbb{N}$ and $\\alpha_m(1-k)+\\hat{\\alpha} k\\neq 0$}:\\\\ \r\n\t\t\t\tIn this case we consider the event $S_1$ where for $\\hat{x}=x+\\frac{3\\varepsilon}{4} e_1$ we have $\\eta_s(B^d(\\hat{x},\\varepsilon/4))=k+1$, $\\eta_s(B^d(x,3\\varepsilon)\\backslash B^d(\\hat{x},\\varepsilon/4))=0$ and $\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)$. Then it follows\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iV_{j_i}^k=\\alpha_m D_xV_{k}^k+\\hat{\\alpha}D_xV_{k+1}^k=\\alpha_m(1-k)+\\hat{\\alpha} k\\neq 0.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\t\\item \\textit{$k\\geq 3$ and $\\alpha_m(1-k)+\\hat{\\alpha} k= 0$}:\\\\\r\n\t\t\t\tThe condition $\\alpha_m(1-k)+\\hat{\\alpha} k= 0$ implies\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\alpha_m(3-k)+\\hat{\\alpha}(k-2)=2(\\alpha_m-\\hat{\\alpha})=2\\frac{\\alpha_m}{k}\\neq 0.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\tWe consider the event $S_2$ where $\\eta_s(B^d(\\hat{x}_i,\\varepsilon/16))=1$ for $i\\in\\{1,\\dots,4\\}$ with $\\hat{x}_j=x+(-1)^j\\frac{3\\varepsilon}{4}e_1$ for $j\\in\\{1,2\\}$ and $\\hat{x}_j=x+(-1)^j\\frac{3\\varepsilon}{4}e_2$ for $j\\in\\{3,4\\}$, $\\eta_s(B^d(x,\\varepsilon/4))=k-3$, $\\eta_s(B^d(x,3\\varepsilon)\\backslash (B^d(x,\\varepsilon/4)\\cup \\bigcup_{i=1}^4B^d(\\hat{x}_i,\\varepsilon/16)))=0$ and $\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)$. Then we have\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iV_{j_i}^k=\\alpha_m D_xV_{k}^k+\\alpha_{\\ell}D_xV_{k+1}^k=\\alpha_m(3-k)+\\hat{\\alpha}(k-2)\\neq 0.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\t\\item \\textit{$k=2$ and $\\alpha_m(1-k)+\\hat{\\alpha} k= 0$:}\\\\\r\n\t\t\t\tIn this case we use the event $S_3$ where $\\eta_s(B^d(\\hat{x}_i,\\varepsilon/16))=1$ for $i\\in\\{1,2,3\\}$ with $\\hat{x}_j=x+\\frac{7\\varepsilon}{16}e_1+(-1)^j\\frac{7\\varepsilon}{16}e_2$ for $j\\in\\{1,2\\}$ and $\\hat{x}_3=x+\\frac{7\\varepsilon}{8}e_1$. Additionally, we assume $\\eta_s(B^d(x,3\\varepsilon)\\backslash ( \\bigcup_{i=1}^3B^d(\\hat{x}_i,\\varepsilon/16)))=0$ and $\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)$. Hence,\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iV_{j_i}^k=\\alpha_m D_xV_{k}^k=\\alpha_m\\neq 0.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\t\\item \\textit{$k=1$ and $\\alpha_m(1-k)+\\hat{\\alpha} k= 0$:}\\\\\r\n\t\t\t\tWe look at the event $S_4$ where \r\n\t\t\t\t$\\eta_s(B^d(\\hat{x}_1,\\varepsilon/4))=1$ for $\\hat{x}_1=x-\\frac{\\varepsilon}{4}e_1$, $\\eta_s(B^d(\\hat{x}_2,\\varepsilon/4))=2$ for $\\hat{x}_2=x+\\frac{3\\varepsilon}{4}e_1$, $\\eta_s(B^d(x,3\\varepsilon)\\backslash ( \\bigcup_{i=1}^2B^d(\\hat{x}_i,\\varepsilon/4)))=0$ and $\\eta_s(A_{3,\\varepsilon}(x,y))\\geq k$ for all $y\\in\\eta_s\\backslash B^d(x,3\\varepsilon)$. Since $\\hat{\\alpha}=0$, it follows\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\tD_x\\sum_{i=1}^n\\alpha_iV_{j_i}^k=2\\alpha_m\\neq 0.\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\\end{enumerate}\r\n\t\t\\begin{figure}[h]\r\n\t\t\t\\centering\r\n\t\t\t\\label{key}\r\n\t\t\t\t\\begin{tikzpicture}\r\n\t\t\t\t\\draw(-80pt,0pt)--(320pt,0pt);\r\n\t\t\t\t\\draw(-80pt,80pt)--(320pt,80pt);\r\n\t\t\t\t\\draw(-80pt,0pt)--(-80pt,80pt);\r\n\t\t\t\t\\draw(0pt,0pt)--(0pt,80pt);\r\n\t\t\t\t\\draw(80pt,0pt)--(80pt,80pt);\r\n\t\t\t\t\\draw(160pt,0pt)--(160pt,80pt);\r\n\t\t\t\t\\draw(240pt,0pt)--(240pt,80pt);\r\n\t\t\t\t\\draw(320pt,0pt)--(320pt,80pt);\r\n\t\t\t\t\\draw (-40pt,-10pt) node{Case 1};\r\n\t\t\t\t\\draw (40pt,-10pt) node{Case 2.1};\r\n\t\t\t\t\\draw (120pt,-10pt) node{Case 2.2};\r\n\t\t\t\t\\draw (200pt,-10pt) node{Case 2.3};\r\n\t\t\t\t\\draw (280pt,-10pt) node{Case 2.4};\r\n\t\t\t\n\t\t\t\t\\draw (-40pt,40pt) circle (36pt);\r\n\t\t\t\t\\filldraw[red] (-40pt,40pt) circle (1pt);\r\n\t\t\t\t\\draw[red] (-34.5pt,40pt) node{$x$};\r\n\t\t\t\t\\draw (-40pt,40pt) -- (-19.4pt,12.2pt);\r\n\t\t\t\t\\draw (-22pt,25pt) node{$\\varepsilon$};\r\n\t\t\t\t\\filldraw[green] (-36.7pt,30pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-67pt,50pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-62pt,28pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-20pt,62pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-45pt,65pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-53pt,55pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-13pt,45pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (-45pt,15pt) circle (1pt);\r\n\t\t\t\t\\draw[->](-70pt,10pt) -- (-60pt,20pt);\r\n\t\t\t\t\\draw[green] (-71pt,7pt) node{\\small $j_m$};\r\n\t\t\t\n\t\t\t\t\\draw (40pt,40pt) circle (36pt);\r\n\t\t\t\t\\draw (40pt,67pt) circle (9pt);\r\n\t\t\t\t\\filldraw[red] (40pt,40pt) circle (1pt);\r\n\t\t\t\t\\draw[red] (45.5pt,40pt) node{$x$};\r\n\t\t\t\t\\draw (40pt,40pt) -- (61.6pt,12.2pt);\r\n\t\t\t\t\\draw (58pt,25pt) node{$\\varepsilon$};\r\n\t\t\t\t\\filldraw[green] (44pt,72pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (36pt,70pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (41pt,63pt) circle (1pt);\r\n\t\t\t\t\\draw[->](26pt,52pt) -- (38pt,66pt);\r\n\t\t\t\t\\draw[green] (26pt,48pt) node{\\small $k+1$};\r\n\t\t\t\n\t\t\t\t\\draw (120pt,40pt) circle (36pt);\r\n\t\t\t\t\\draw (120pt,40pt) circle (9pt);\r\n\t\t\t\t\\draw (120pt,67pt) circle (2.25pt);\r\n\t\t\t\t\\draw (120pt,13pt) circle (2.25pt);\r\n\t\t\t\t\\draw (147pt,40pt) circle (2.25pt);\r\n\t\t\t\t\\draw (93pt,40pt) circle (2.25pt);\r\n\t\t\t\t\\filldraw[red] (120pt,40pt) circle (1pt);\r\n\t\t\t\t\\draw[red] (125.5pt,40pt) node{$x$};\r\n\t\t\t\t\\draw (120pt,40pt) -- (141.6pt,12.2pt);\r\n\t\t\t\t\\draw (138pt,25pt) node{$\\varepsilon$};\r\n\t\t\t\t\\filldraw[green] (120pt,67pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (120pt,13pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (93pt,40pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (147pt,40pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (119pt,43pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (116pt,39pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (120pt,36pt) circle (1pt);\r\n\t\t\t\t\\draw[->](104pt,53pt) -- (116pt,41pt);\r\n\t\t\t\t\\draw[green] (104pt,55pt) node{\\small $k-3$};\r\n\t\t\t\n\t\t\t\t\\draw (200pt,40pt) circle (36pt);\r\n\t\t\t\t\\draw (215.75pt,55.75pt) circle (2.25pt);\r\n\t\t\t\t\\draw (200pt,71.5pt) circle (2.25pt);\r\n\t\t\t\t\\draw (184.25pt,55.75pt) circle (2.25pt);\r\n\t\t\t\t\\filldraw[red] (200pt,40pt) circle (1pt);\r\n\t\t\t\t\\draw[red] (205.5pt,40pt) node{$x$};\r\n\t\t\t\t\\draw (200pt,40pt) -- (221.6pt,12.2pt);\r\n\t\t\t\t\\draw (218pt,25pt) node{$\\varepsilon$};\r\n\t\t\t\t\\filldraw[green] (215.75pt,55.75pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (200pt,71.5pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (184.25pt,55.75pt) circle (1pt);\r\n\t\t\t\n\t\t\t\t\\draw (280pt,40pt) circle (36pt);\r\n\t\t\t\t\\draw (280pt,31pt) circle (9pt);\r\n\t\t\t\t\\draw (280pt,67pt) circle (9pt);\r\n\t\t\t\t\\filldraw[red] (280pt,40pt) circle (1pt);\r\n\t\t\t\t\\draw[red] (285.5pt,40pt) node{$x$};\r\n\t\t\t\t\\draw (280pt,40pt) -- (301.6pt,12.2pt);\r\n\t\t\t\t\\draw (298pt,25pt) node{$\\varepsilon$};\r\n\t\t\t\t\\filldraw[green] (278pt,71pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (280pt,31pt) circle (1pt);\r\n\t\t\t\t\\filldraw[green] (283pt,64pt) circle (1pt);\r\n\t\t\t\\end{tikzpicture}\r\n\t\t\\caption{Configurations in $B^d(x,\\varepsilon)$}\\label{fig:kNN}\r\n\t\t\\end{figure}\r\n\t\t\tLet $\\varepsilon=\\bar{c} s^{-1/d}$ for $\\bar{c}>1$ such that $c_1e^{sc_2\\varepsilon^d}\\leq\\frac{1}{2}$. Then, analogously to Case 1, we get $\\mathbb{P}(S_u)\\geq c_{\\alpha,k,d}$ for a constant $c_{\\alpha,k,d}>0$ and $u\\in\\{1,\\dots,4\\}$. Moreover, let\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tc_\\alpha=\\begin{cases}\r\n\t\t\t\t\t\\alpha_m(1-k)+\\hat{\\alpha} k, &\\text{ for }k\\in \\mathbb{N} \\text{ and }\\alpha_m(1-k)+\\hat{\\alpha} k\\neq 0,\\\\\r\n\t\t\t\t\t\\alpha_m(3-k)+\\hat{\\alpha}(k-2), &\\text{ for }k\\geq 3\\text{ and }\\alpha_m(1-k)+\\hat{\\alpha} k=0, \\\\\r\n\t\t\t\t\t\\alpha_m, &\\text{ for }k=2 \\text{ and }\\alpha_m(1-k)+\\hat{\\alpha} k=0,\\\\\r\n\t\t\t\t\t2\\alpha_{m}, &\\text{ for }k=1 \\text{ and }\\alpha_m(1-k)+\\hat{\\alpha} k=0.\r\n\t\t\t\t\\end{cases}\r\n\t\t\t\\end{align*}\r\n\t\t\tThen, for $A_s=\\{x\\in W:B^d(x,8\\varepsilon)\\subset W\\}$ and $s$ large enough such that $\\lambda_d(A_s)>\\frac{\\lambda_d(W)}{2}$ it follows for $u\\in\\{1,\\dots,4\\}$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\left[\\int_W \\left(D_x\\sum_{i=1}^{n}\\alpha_iV_{j_i}^{k}\\right)^2\\;\\mathrm{d}\\lambda(x)\\right]\\geq c_\\alpha^2\\int_{A_s} \\mathbb{P}(S_u)\\;\\mathrm{d}\\lambda(x)\r\n\t\t\t\t\\geq c_{\\alpha,k,W,d} s\r\n\t\t\t\\end{align*}\r\n\t\tfor a suitable constant $c_{\\alpha,k,W,d}>0$.\r\n\t\t\r\n\t\tOur functionals can be written as sums of scores as in \\eqref{eq:sum_of_scores}. For $y\\in \\eta_s$, $j\\in\\{k,\\dots,k_{\\mathrm{max}}\\}$ and $s\\geq 1$ the corresponding score is given by\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\xi_s(y,\\eta_s)=\\mathbbm{1}\\{\\mathrm{deg}(y,\\eta_s)=j\\}.\r\n\t\t\t\\end{align*}\r\n\t\t\tThe scores $(\\xi_s)_{s\\geq 1}$ clearly fulfil a $(4+p)$-th moment condition and are\r\n\t\t\tby \\cite[proof of Theorem 3.3]{SY21} exponentially stabilising. Therefore, we can apply Lemma \\ref{lemma:second_difference_operator}, which completes together with Theorem \\ref{thm:varbound} the proof.\r\n\t\t\\end{proof}\r\n\t\t\r\n\t\t\\begin{rema}\\label{rem:inhomogeneous_spatial_random_graphs}\r\nThroughout this section we assume that the underlying Poisson processes have the intensity measures $s\\lambda_d|_W$ for $s\\geq1$. However, we can generalise our results from these homogeneous Poisson processes to a large class of inhomogeneous Poisson processes. Let $\\mu$ be a measure with a density $g: W\\to[0,\\infty)$ such that $\\underline{c}\\leq g(x)\\leq\\overline{c}$ for all $x\\in W$ and constants $\\underline{c},\\overline{c}>0$. All results of this section continue to hold for Poisson processes with intensity measures $s\\mu$ for $s\\geq 1$. We only have to slightly modify the proofs by bounding the intensity measure by $s \\underline{c}\\lambda_d|_W$ from below or by $s \\overline{c}\\lambda_d|_W$ from above depending on whether a lower or an upper bound is required in our estimates. Consequently, some of the constants might change.\r\n\\end{rema}\r\n\t\t\r\n\t\t\\section{Random Polytopes}\\label{sec:random_polytopes}\r\n\t\tThe study of the convex hull of random points started with the works \\cite{RS63} and \\cite{RS64}. In \\cite{R05} central limit theorems for the volume and number of $k$-faces as well as variance bounds were shown. Variance asymptotics and central limit theorems for all intrinsic volumes of the convex hull in a ball were derived in \\cite{CSY13}. In \\cite{LSY19} the rates of convergence for the central limit theorems were further improved.\r\n\t\t\r\n\t\tThe $L^p$ surface area measure for a convex body was introduced in \\cite{L93}, where the $L^p$ Minkowski problem was described. The Minkowski problem asks for conditions for a Borel measure on the sphere under which this measure is the $L^p$ surface area of a convex body. The discrete $L^p$ Minkowski problem is obtained in the special case, where this convex body is a polytope. This situation can, for example, be found in \\cite{HLYZ05} and the references therein. In \\cite{HLRT22} the expected $L^p$ surface area of random polytopes was considered as a special case of $T$-functionals of random polytopes.\r\n\t\t\r\n\t\tIn this section the two-dimensional vector of $L^p$ surface areas of a random polytope for different $p_1,p_2\\in[0,1]$ is considered and lower variance bounds for linear combinations as well as a result on the multivariate normal approximation are derived.\r\n\t\tFor $s\\geq 1$ let $\\eta_s$ be a homogeneous Poisson process on $B^d(0,1)$ with intensity $s$, i.e.\\ a Poisson process on $\\mathbb{R}^d$ with intensity measure $\\lambda=s\\lambda_d|_{B^d(0,1)}$, where $\\lambda_d|_{B^d(0,1)}$ denotes the restriction of the Lebesgue measure to $B^d(0,1)$. \t\r\n\t\tWe consider the random polytope $Q$ generated by $\\eta_s\\cup\\{0\\}$, i.e. $Q$ is the convex hull $\\mathrm{Conv}(\\eta_s\\cup\\{0\\})$. For $p\\in[0,1]$ its $L^p$ surface area is given by \r\n\t\t\\begin{align}\\label{def l^p surface area}\r\n\t\t\tA_p = A_p(Q)=\\sum_{\\text{$F$ facet of $Q$}} \\mathrm{dist}(0,F)^{1-p}\\lambda_{d-1}(F),\r\n\t\t\\end{align}\r\n\t\twhere $\\mathrm{dist}(0,F)$ stands for the distance of $F$ to the origin $0$ (see for instance \\cite[Section 1]{HLRT22}). \r\n\t\t\\begin{theorem}\r\n\t\t\t\\label{theorem:randompoly_var}\r\n\t\t\tThe asymptotic covariance matrix of the vector $s^{(d+3)/(2(d+1))}(A_{p_1},A_{p_2})$ for $p_1,p_2\\in [0,1]$ with $p_1\\neq p_2$ is positive definite, i.e.\\ for any $\\alpha=(\\alpha_1,\\alpha_2)\\in\\mathbb{R}^2\\backslash\\{0\\}$ there exists a constant $c>0$ such that for $s$ sufficiently large\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{Var}[\\alpha_1A_{p_1}+\\alpha_2A_{p_2}]\\geq cs^{-(d+3)/(d+1)}.\r\n\t\t\t\\end{align*}\r\n\t\t\\end{theorem}\r\n\t\r\n\t\t\r\n\t\tNote that we add the origin as an extra point to the Poisson process mainly for technical reasons to ensure a useful definition of the $L^p$ surface area. However, since we are in this section only interested in asymptotic statements for $s\\to\\infty$, this does not make a difference. Let $\\widetilde{Q}$ denote the random polytope that is generated by $\\eta_s$, i.e.\\ $\\widetilde{Q}=\\mathrm{Conv}(\\eta_s)$, and let $A_p(\\widetilde{Q})$ be defined by the right-hand side of \\eqref{def l^p surface area}, which is also well-defined if the origin does not belong to the polytope. Since one can choose $m$ disjoint sets $U_1,\\dots, U_m\\subset B^d(0,1)$ for some $m\\in\\mathbb{N}$ with $\\lambda_d(U_i)>0$, $i\\in\\{1,\\dots,m\\}$, such that $0\\in\\mathrm{Conv}(\\xi)$ for all $\\xi\\in\\mathbf{N}$ with $\\xi\\cap U_i\\neq\\emptyset$ for all $i\\in\\{1,\\dots,m\\}$, we have\r\n\t\t\\begin{align}\r\n\t\t\t\\p(A_p(Q)\\neq A_p(\\widetilde{Q})) &\\leq\\p(0\\notin\\mathrm{Conv}(\\eta_s))\\leq1- \\p(\\eta_s(U_i) \\geq 1 \\text{ for } i=1,\\dots,m) \\nonumber\\\\\r\n\t\t\t&=1-\\prod_{i=1}^m(1-e^{-s\\lambda_d(U_i)}) \\leq c_{1,q}e^{-c_{2,q}s} \\label{eqn:probability_approximation}\r\n\t\t\\end{align}\r\n\t\tfor $s\\geq1$ with suitable constants $c_{1,q},c_{2,q}>0$.\r\n\t\t Therefore, the triangle inequality and the estimate $\\lvert A_p(Q)-A_p(\\widetilde{Q})\\rvert\\leq 2\\kappa_{d}$ provide\r\n\t\t \\begin{align}\r\n\t\t \t\\lvert\\mathrm{Var}[A_p(Q)]^{1/2}-\\Var[A_p(\\widetilde{Q})]^{1/2}\\rvert^2&\\leq \\Var[A_p(Q)-A_p(\\widetilde{Q})]\\leq \t\\E[(A_p(Q)-A_p(\\widetilde{Q}))^2] \\nonumber\\\\&\r\n\t\t \t\\leq (2\\kappa_{d})^2c_{1,q}e^{-c_{2,q}s}. \\label{eqn:variance_approximation}\r\n\t\t \\end{align}\r\n\t\tand similarly\r\n\t\t\\begin{equation}\\label{eqn:expectation_approximation}\r\n\t\t\\lvert\\mathbb{E}[A_p(Q)]-\\mathbb{E}[A_p(\\widetilde{Q})]\\rvert \\leq 2\\kappa_{d} c_{1,q}e^{-c_{2,q}s}.\r\n\t\t\\end{equation}\r\n\t\tThus, we consider $A_p(\\widetilde{Q})$ instead of $A_p(Q)$ throughout this section and, especially, in the proof of Theorem \\ref{theorem:randompoly_var}.\r\n\t\r\n\t\tWe work in the general framework described in Appendix \\ref{appendix:stabilising_functionals} with the underlying space $\\mathbb{X}=B^d(0,1)$ and the metric\r\n\t\t\\begin{align*}\r\n\t\t\td_{\\max}(x,y)=\\max{\\{\\lVert x-y\\rVert,\\sqrt{\\lvert \\lVert x\\rVert-\\lVert y\\rVert \\lvert}\\}}\r\n\t\t\\end{align*}\r\n\t\tfor $x,y\\in B^d(0,1)$. \r\n\t\tTo prove condition \\eqref{condition}, we start with writing the difference of the surface area of the ball $B^d(0,1)$ and the $L^p$ surface area of the random polytope $\\widetilde{Q}$ as a sum of scores. The following arguments are mostly analogously to \\cite[Section 3.4]{LSY19}, where similar representations for intrinsic volumes were derived. Especially, because the surface area is twice the $(d-1)$-st intrinsic volume, it was shown in \\cite[Lemma 3.8]{LSY19} that\r\n\t\t\\begin{align*}\r\n\t\t\ts(\\lambda_{d-1}(\\partial B^d(0,1))-\\lambda_{d-1}(\\partial \\widetilde{Q}))= 2 \\sum_{x\\in\\eta_s}\\xi_{d-1,s}(x,\\eta_{s})\r\n\t\t\\end{align*}\r\n\t\twith the scores $\\xi_{d-1,s}$ as in \\cite[last display on p.\\ 960]{LSY19} for $s\\geq 1$ and where $\\partial A$ denotes the boundary of a set $A\\subseteq B^d(0,1)$.\r\n\t\tWe consider analogous scores $\\xi_s$ for the $L^p$ surface area, i.e.\\\r\n\t\t\\begin{align*}\r\n\t\t\t\\xi_s(x,\\eta_s)=2\\xi_{d-1,s}(x,\\eta_s)+\\frac{s}{d}\\sum_{F\\in\\mathcal{F}:x\\in F}(1-\\mathrm{dist}(0,F)^{1-p})\\lambda_{d-1}(F)\r\n\t\t\\end{align*}\r\n\t\tfor $x\\in\\eta_s$, where $\\mathcal{F}$ denotes the set of all facets of $\\widetilde{Q}$. Therefore, we have\r\n\t\t\\begin{align*}\r\n\t\t\t&\\sum_{x\\in\\eta_s}\\xi_s(x,\\eta_s)\r\n\t\t\t=\\sum_{x\\in\\eta_s}\\Big( 2 \\xi_{d-1,s}(x,\\eta_s)+\\frac{s}{d}\\sum_{F\\in\\mathcal{F}:x\\in F}(1-\\mathrm{dist}(0,F)^{1-p})\\lambda_{d-1}(F)\\Big)\\\\\r\n\t\t\t&=2\\sum_{x\\in\\eta_s}\\xi_{d-1,s}(x,\\eta_s)+\\sum_{x\\in\\eta_s}\\frac{s}{d}\\sum_{F\\in\\mathcal{F}:x\\in F}(1-\\mathrm{dist}(0,F)^{1-p})\\lambda_{d-1}(F)\\\\&=s\\lambda_{d-1}(\\partial B^d(0,1))-s\\lambda_{d-1}(\\partial \\widetilde{Q})+s\\lambda_{d-1}(\\partial\\widetilde{Q})-sA_p(\\widetilde{Q})\\\\\r\n\t\t\t&=s(\\lambda_{d-1}(\\partial B^d(0,1))-A_p(\\widetilde{Q})).\r\n\t\t\\end{align*}\r\n\t\t\t\tFix $\\rho_0\\in(0,\\frac{1}{4})$ and let $B_{-\\rho_0}=B^d(0,1)\\backslash B^d(0,1-\\rho_0)$. In the following Lemma \\ref{lemma_scores} and the proof of Theorem \\ref{theorem:randompoly_var} we consider slightly modified scores, which are defined by\r\n\t\t\\begin{align}\r\n\t\t\t\\label{eq:modified_scores}\r\n\t\t\t\\tilde{\\xi}_s(x,\\eta_{s})=\\mathbbm{1}\\{x\\in B_{-\\rho_0}\\}\\xi_s(x,(\\eta_s\\cap B_{-\\rho_0})\\cup\\{0\\})\r\n\t\t\\end{align}\r\n\t\tfor $x\\in\\eta_s$, $s\\geq 1$, and\r\n\t\t\\begin{align*}\r\n\t\t\t\\widetilde{A}_p=\\sum_{x\\in\\eta_s}\\tilde{\\xi}_s(x,\\eta_{s}).\r\n\t\t\\end{align*}\r\n\t\tWe establish that the scores $\\tilde{\\xi}_s$ have some crucial properties. For exact definitions we refer to Appendix \\ref{appendix:stabilising_functionals}.\r\n\t\t\\begin{lemma}\r\n\t\t\t\\label{lemma_scores}\r\n\t\t\tThe scores $\\tilde{\\xi}_s$ are exponentially stabilising with $\\alpha_{stab}=d+1$, decay exponentially fast with the distance to the boundary $\\partial B^d(0,1)$ with $\\alpha_K=d+1$ and fulfil a $q$-th moment condition for $q\\geq 1$.\r\n\t\t\\end{lemma}\r\n\t\t\\begin{proof}\t\t\r\n\t\t\tAnalogously to \\cite[Lemma 3.10, Lemma 3.11 and Lemma 3.12]{LSY19} one can show that the scores are exponentially stabilising and decay exponentially fast with the distance to the boundary $\\partial B^d(0,1)$.\r\n\t\t\t\r\n\t\t\tLet $R(x,\\eta_s\\cup\\{x\\})$ denote the corresponding radius of stabilisation with respect to the $d_\\mathrm{max}$- distance that is derived in \\cite[p.\\ 963]{LSY19} and\r\n\t\t\tlet $\\tilde{\\xi}_{d-1,s}$ denote the slightly adjusted version of the score $\\xi_{d-1,s}$, which is defined as $\\xi_s$ in \\eqref{eq:modified_scores}.\r\n\t\t\t\r\n\t\t\tIn order to show a $q$-th moment condition for $p\\in[0,1]$ we use that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\bigcup_{F\\in\\mathcal{F}:x\\in F}F\\subseteq B_{\\mathrm{max}}^d\\left(x,R(x,\\eta_s\\cup\\{x\\})\\right)\\subseteq B^d\\left(x,R(x,\\eta_s\\cup\\{x\\})\\right),\r\n\t\t\t\\end{align*}\r\n\t\t\twhere $B_{\\mathrm{max}}^d$ denotes the ball with respect to the $d_\\mathrm{max}$-distance. Recall that $\\mathcal{F}$ stands for the set of all facets of the random polytope. Hence, due to monotonicity of the surface area of convex sets we have\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\label{surface_area_est}\r\n\t\t\t\t\\sum_{F\\in\\mathcal{F}:x\\in F}\\lambda_{d-1}(F)\\leq d\\kappa_d R(x,\\eta_s\\cup\\{x\\})^{d-1}.\r\n\t\t\t\\end{align}\r\n\t\t\tLet $\\widetilde{H}$ be the hyperplane through $\\partial B^d\\big(x,R(x,\\eta_s\\cup\\{x\\})\\big)\\cap \\partial B^d(0,1)$. By the definition of the radius of stabilisation in \\cite[p.\\ 963]{LSY19}, we know that for each vertex $x$ of the random polytope with $R(x,\\eta_s\\cup\\{x\\})\\leq 1$, $[0,x]$ intersects $\\widetilde{H}$, where $[0,x]$ denotes the line connecting $0$ and $x$. Moreover, we get with \\cite[p.\\ 963]{LSY19} that for a vertex $x$ the distance of the origin to a facet that contains $x$ is at least as large as the distance from the origin to the hyperplane $\\widetilde{H}$. Hence, for a facet $F$ that contains $x$ we have\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\label{radius_est}\r\n\t\t\t\t\\mathrm{dist}(0,F)\\geq \\mathrm{dist}(0,\\widetilde{H})\\geq\\sqrt{1-R(x,\\eta_s\\cup\\{x\\})^2}\\geq 1-R(x,\\eta_s\\cup\\{x\\})^2\r\n\t\t\t\\end{align}\r\n\t\t\tsince the radius of the $(d-1)$-dimensional ball $\\widetilde{H}\\cap B^d(0,1)$ can be bounded from above by $R(x,\\eta_s\\cup\\{x\\})$. The bound in \\eqref{radius_est} is obviously also true for $R(x,\\eta_s\\cup\\{x\\})>1$.\r\n\t\t\t\r\n\t\t\tSince $\\mathrm{dist}(0,F)\\leq 1$, it holds that $\\mathrm{dist}(0,F)^{1-p}\\geq \\mathrm{dist}(0,F)$ for $p\\in[0,1]$ and thus with \\eqref{surface_area_est} and \\eqref{radius_est} we have for $x\\in B_{-\\rho_0}$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lvert \t\\tilde{\\xi}_s(x,\\eta_s)\\rvert&=\\Big\\lvert2\\tilde{\\xi}_{d-1,s}(x,\\eta_s)+\\frac{s}{d}\\sum_{F\\in\\mathcal{F}:x\\in F}(1-\\mathrm{dist}(0,F)^{1-p})\\lambda_{d-1}(F)\\Big\\rvert \\\\\r\n\t\t\t\t&\\leq 2 \\lvert\\tilde{\\xi}_{d-1,s}(x,\\eta_s)\\rvert+\\frac{s}{d}\\sum_{F\\in\\mathcal{F}:x\\in F}\\lvert (1-\\mathrm{dist}(0,F))\\rvert \\lambda_{d-1}(F)\\\\\r\n\t\t\t\t&\\leq 2 \\lvert\\tilde{\\xi}_{d-1,s}(x,\\eta_s)\\rvert+\\frac{s}{d}R(x,\\eta_s\\cup\\{x\\})^2\\sum_{F\\in\\mathcal{F}:x\\in F} \\lambda_{d-1}(F)\\\\\r\n\t\t\t\t&\\leq 2 \\lvert\\tilde{\\xi}_{d-1,s}(x,\\eta_s)\\rvert+\\kappa_dsR(x,\\eta_s\\cup\\{x\\})^{d+1}.\r\n\t\t\t\\end{align*}\r\n\t\t\tCombining this with the fact from \\cite[Lemma 3.11]{LSY19} that there exist constants $C_{stab},c_{stab}>0$ such that\r\n\t\t\t$$\r\n\t\t\t\\p(R(x,\\eta_s\\cup\\{x\\})\\geq r)\\leq C_{stab} \\exp[-c_{stab} s r^{d+1}]\r\n\t\t\t$$\r\n\t\t\tfor $x\\in B^d(0,1)$, $r\\geq0$, $s\\geq 1$ and \\cite[Lemma 3.13]{LSY19} that says that the scores $\\tilde{\\xi}_{d-1,s}$ fulfil a $q$-th moment condition provides the $q$-th moment condition for $\\tilde{\\xi}_s$.\r\n\t\t\\end{proof}\r\n\t\t\r\nCombining Lemma \\ref{lemma_scores} with the arguments from the proof of \\cite[Lemma 3.9]{LSY19}, we derive that there exist constants $\\bar{C}_p,\\bar{c}_p>0$ such that\r\n$$\r\n\\max\\big\\{ \\p(s A_p(\\widetilde{Q})\\neq \\widetilde{A}_p), |\\mathbb{E}[s A_p(\\widetilde{Q})] - \\mathbb{E}[\\widetilde{A}_p], |\\mathrm{Var}[s A_p(\\widetilde{Q})] - \\mathrm{Var}[\\widetilde{A}_p]| \\big\\} \\leq \\bar{C}_p \\exp[-\\bar{c}_p s]\r\n$$\r\nfor $s\\geq 1$. Together with \\eqref{eqn:probability_approximation}, \\eqref{eqn:variance_approximation} and \\eqref{eqn:expectation_approximation} we obtain\r\n\\begin{equation}\\label{eqn:approximation_A_p}\r\n\\max\\big\\{\\p(s A_p\\neq \\widetilde{A}_p), |\\mathbb{E}[s A_p] - \\mathbb{E}[\\widetilde{A}_p]|, |\\mathrm{Var}[s A_p] - \\mathrm{Var}[\\widetilde{A}_p]| \\big\\} \\leq \\hat{C}_p \\exp[-\\hat{c}_p s]\r\n\\end{equation}\r\nfor $s\\geq 1$ with constants $\\hat{C}_p,\\hat{c}_p>0$.\r\n\t\t\r\n\t\tLet $S(y^{(1)},\\dots,y^{(m)})$ denote the simplex with vertices $y^{(1)},\\dots,y^{(m)}$ for $m\\in\\{1,\\dots,d+1\\}$.\r\n\t\tFor the proof of Theorem \\ref{theorem:randompoly_var} we need to know how the $L^p$ surface area of a polytope changes if we add a simplex on one of its facets. Let this $d$-dimensional simplex be given by $S(z^{(1)},\\dots,z^{(d+1)})$ for points $z^{(1)},\\dots,z^{(d+1)}\\in B^d(0,1)$, where $z^{(d+1)}$ denotes the point that is added and $S(z^{(1)},\\dots,z^{(d)})$ is the original facet of the polytope. The facets of the simplex are given by $F_i=S(z^{(1)},\\dots,z^{(i-1)},z^{(i+1)},\\dots,z^{(d+1)})$ and the distance of a facet to the origin is denoted by $\\rho_i=\\mathrm{dist}(F_i,0)$ for $i\\in\\{1,\\dots,d+1\\}$. We are interested in \r\n\t\t\\begin{align}\\label{def_Delta}\r\n\t\t\t\\Delta_{p}=\\sum_{i=1}^{d}\\rho_i^{1-p}\\lambda_{d-1}(F_i)-\\rho_{d+1}^{1-p}\\lambda_{d-1}(F_{d+1}),\r\n\t\t\\end{align}\r\n\t\twhich is the change of the $L^p$ surface area after adding the simplex.\r\n\t\t\r\n\t\tIn the following we also use the notation $\\bar{h}=\\mathrm{dist}(z^{(d+1)},F_{d+1})$ for the height of the added simplex, $T_i=S(z^{(1)},\\dots,z^{(i-1)},z^{(i+1)},\\dots,z^{(d)})$ for the $(d-2)$-dimensional faces of the base of the simplex and $h_i=\\mathrm{dist}(\\bar{z}_{d+1},T_i)$ for $i\\in\\{1,\\dots,d\\}$, where $\\bar{z}_{d+1}$ is the projection of $z^{(d+1)}$ to $F_{d+1}$.\r\n\t\tThe behaviour of $\\Delta_{p}$ is described in the following geometric lemma.\r\n\t\t\\begin{lemma}\\label{lemma_geometry}\r\n\t\t\tLet $z^{(1)},\\dots,z^{(d+1)}\\in B^d(0,1)$.\r\n\t\tFor a simplex $S(z^{(1)},\\dots,z^{(d+1)})$, whose vertices are chosen in such a way that $\\arg\\min_{i=1,\\dots,d+1}\\rho_i=d+1$ we have\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\Big\\lvert \\Delta_{p}-\\frac{1}{d-1}\\sum_{i=1}^d\\lambda_{d-2}(T_i)\\Big(\\sqrt{h_i^2+\\bar{h}^2}-h_i\\Big)\\Big\\rvert\\leq\\rho_{d+1}^{-p}(1-\\rho_{d+1})\\sum_{i=1}^{d+1}\\lambda_{d-1}(F_i)\r\n\t\t\t\\end{align*}\r\n\t\tfor $p\\in[0,1]$ and\r\n\t\t\\begin{align*}\r\n\t\t\t\\Big\\lvert &\\Delta_{p_1}-\\Delta_{p_2}-\\sum_{i=1}^{d}(p_2-p_1)(\\rho_i-\\rho_{d+1})\\lambda_{d-1}(F_i)\\Big\\rvert\\\\&\\leq 2 \\rho_{d+1}^{-p_2-1} (1-\\rho_{d+1})^2\\sum_{i=1}^d\\lambda_{d-1}(F_i)+\\rho_{d+1}^{-p_2}(1-\\rho_{d+1})\\sum_{i=1}^d\\lambda_{d-2}(T_i)\\Big(\\sqrt{h_i^2+\\bar{h}^2}-h_i\\Big)\r\n\t\t\\end{align*}\r\n\t\tfor $p_1,p_2\\in[0,1]$ with $p_10$ be fixed. Throughout the proof we choose $s\\geq 1$ depending on $a$ large enough such that several conditions hold.\r\n\t\t\tRecall that $e_i$ denotes the standard unit vector in the $i$-th direction and define $x^{(d+1)}=(1-as^{-2/(d+1)})e_1$. Let $x^{(1)},\\dots, x^{(d)}\\in B^d(0,1)$ be points on the hyperplane\r\n\t\t\t$$\r\n\t\t\tH=\\{y=(y_1,\\dots,y_d)\\in\\mathbb{R}^d:y_1=1-(a+a^2)s^{-2/(d+1)}\\}\r\n\t\t\t$$\r\n\t\t\tof pairwise distance $2\\ell=2\\sqrt{a}s^{-1/(d+1)}$ that form a regular $(d-1)$-dimensional simplex $S$ such that all points have the same distance to $x^{(d+1)}$. Then, $x^{(1)},\\dots, x^{(d+1)}$ are the vertices of a $d$-dimensional simplex with height $h=a^2s^{-2/(d+1)}$.\r\n\t\t\tFor a set $A\\subset B^d(0,1)$ and $x\\in B^d(0,1)\\backslash\\mathrm{int}(A)$ let \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\mathrm{Vis}(x,A)=\\{y\\in B^d(0,1):[y,x]\\cap \\mathrm{int}(A)=\\emptyset\\}\r\n\t\t\t\\end{align*} denote the visibility region at $x$, where $\\mathrm{int}(A)$ stands for the interior of $A$. Recall that $[y,x]$ denotes the line connecting $x$ and $y$. Let $\\varepsilon_h,\\varepsilon_\\ell\\in(0,1/4)$, which will be chosen sufficiently small such that some properties are satisfied throughout this proof. Now, choose $d$ cuboids $C_1^x,\\dots, C_{d}^x\\subset \\mathrm{Vis}(x^{(d+1)},\\mathrm{Conv}(x^{(1)},\\dots,x^{(d+1)}))$ containing $x^{(1)},\\dots, x^{(d)}$ each with height $\\varepsilon_ha^2s^{-2/(d+1)}$ and such that its $(d-1)$-dimensional base is a cube of side length $\\varepsilon_\\ell \\sqrt{a}s^{-1/(d+1)}$ which is contained in the hyperplane $H$.\r\n\t\t\t\r\n\t\t\tIndeed, $\\varepsilon_h,\\varepsilon_\\ell\\in(0,1/4)$ can be chosen small enough such that $C_1^x,\\dots, C_{d}^x\\subset B^d(0,1)$ because by e.g.\\ \\cite[Section 6, p.\\ 367]{B92} the height $h_k$ of a $k$-dimensional regular simplex $S_k$ with edge length $2\\ell$ is given by\r\n\t\t\t\\begin{align}\\label{h(Sk)}\r\n\t\t\t\th_k(S_k)=\\frac{2\\ell}{\\sqrt{2}}\\sqrt{\\frac{k+1}{k}},\r\n\t\t\t\\end{align}\r\n\t\t\ti.e. for $y\\in C_i^x$ with $i\\in\\{1,\\dots, d\\}$ we have\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lVert y\\rVert^2&\\leq (1-(a+a^2-\\varepsilon_ha^2)s^{-2/(d+1)})^2+\\left(\\frac{d-1}{d}h_{d-1}(S_{d-1})+(d-1)\\varepsilon_\\ell\\sqrt{a} s^{-1/(d+1)}\\right)^2\\\\\r\n\t\t\t\t&=(1-(a+a^2-\\varepsilon_ha^2)s^{-2/(d+1)})^2+\\left(\\sqrt{\\frac{2(d-1)a}{d}}s^{-1/(d+1)}+(d-1)\\varepsilon_\\ell\\sqrt{a} s^{-1/(d+1)}\\right)^2\\\\\r\n\t\t\t\t&=1-\\Big[2\\Big(\\frac{a}{d}+a^2-\\varepsilon_ha^2\\Big)-2(d-1)\\sqrt{\\frac{2(d-1)}{d}}\\varepsilon_\\ell a-(d-1)^2\\varepsilon_\\ell^2a\\\\&\\quad\\quad\\quad\\quad-(a+a^2-\\varepsilon_ha^2)^2s^{-2/(d+1)}\\Big]s^{-2/(d+1)}<1\r\n\t\t\t\\end{align*}\r\n\t\t\tfor $\\varepsilon_h,\\varepsilon_\\ell\\in(0,1/4)$ small enough and $s$ sufficiently large.\r\n\t\t\t\\begin{figure}\r\n\t\t\t\t\\centering\r\n\t\t\t\t\r\n\t\t\t\t\\begin{tikzpicture}[decoration={brace,amplitude=2mm}]\r\n\t\t\t\t\tcentering\r\n\t\t\t\t\t\\draw[black] (0,0) -- (0:4) arc (0:180:4) -- cycle;\r\n\t\t\t\t\t\\filldraw[black] (0,0) circle (0.06);\r\n\t\t\t\t\t\\draw(0,-0.3) node{\\footnotesize$0$};\r\n\t\t\t\t\t\\filldraw[black] (3,1.5) circle (0.06);\r\n\t\t\t\t\t\\draw(3,1.2) node{\\footnotesize $x^{(2)}$};\r\n\t\t\t\t\t\\filldraw[black] (-3,1.5) circle (0.06);\r\n\t\t\t\t\t\\draw(-3,1.2) node{\\footnotesize$x^{(1)}$};\r\n\t\t\t\t\t\\filldraw[black] (0,3) circle (0.06);\r\n\t\t\t\t\t\\draw(0.1,3.4) node{\\footnotesize$x^{(d+1)}$};\r\n\t\t\t\t\t\\draw (-3,1.5)--(0,3);\r\n\t\t\t\t\t\\draw (0,3)--(3,1.5);\r\n\t\t\t\t\t\\draw (3,1.5)--(-3,1.5);\r\n\t\t\t\t\t\\draw[dashed] (-3,1.5)--(-4,1);\r\n\t\t\t\t\t\\draw[dashed] (3,1.5)--(4,1);\r\n\t\t\t\t\t\\draw[dashed](-4,1)--(4,1);\r\n\t\t\t\t\t\\draw[decorate] (-4.5,1)--(-4.5,4);\r\n\t\t\t\t\t\\draw(-5.75,2.5) node{$c_as^{-2/(d+1)}$};\r\n\t\t\t\t\t\\draw[decorate] (4.5,3)--(4.5,1.5);\r\n\t\t\t\t\t\\draw(5.75,2.25) node{$a^2s^{-2/(d+1)}$};\r\n\t\t\t\t\t\\draw[decorate] (4.5,4)--(4.5,3);\r\n\t\t\t\t\t\\draw(5.75,3.5) node{$as^{-2/(d+1)}$};\r\n\t\t\t\t\t\\draw[decorate] (0,0.75)--(-3,0.75);\r\n\t\t\t\t\t\\draw(-1.25,0.25) node{$\\sqrt{a}s^{-1/(d+1)}$};\r\n\t\t\t\t\t\\draw[blue](-3.5,1.75)--(-3.5,1.5);\r\n\t\t\t\t\t\\draw[blue](-3.5,1.75)--(-3,1.75);\r\n\t\t\t\t\t\\draw[blue](-3,1.75)--(-3,1.5);\r\n\t\t\t\t\t\\draw[blue](-3.5,1.5)--(-3,1.5);\r\n\t\t\t\t\t\\draw[blue, ->](-3.25,2.5)--(-3.25,1.8);\r\n\t\t\t\t\t\\draw[blue] (-3.25,2.8) node{$C_1^x$};\r\n\t\t\t\t\t\\draw[blue](3.5,1.75)--(3.5,1.5);\r\n\t\t\t\t\t\\draw[blue](3.5,1.75)--(3,1.75);\r\n\t\t\t\t\t\\draw[blue](3,1.75)--(3,1.5);\r\n\t\t\t\t\t\\draw[blue](3.5,1.5)--(3,1.5);\r\n\t\t\t\t\t\\draw[blue, ->](3.25,2.5)--(3.25,1.8);\r\n\t\t\t\t\t\\draw[blue] (3.25,2.8) node{$C_2^x$};\r\n\t\t\t\t\t\r\n\t\t\t\t\t\\filldraw[red] (3.4,1.6) circle (0.06);\r\n\t\t\t\t\t\\draw[red](4.1,1.6) node{\\footnotesize $z^{(2)}$};\r\n\t\t\t\t\t\\filldraw[red] (-3.2,1.7) circle (0.06);\r\n\t\t\t\t\t\\draw[red](-4,1.7) node{\\footnotesize$z^{(1)}$};\r\n\t\t\t\t\t\\filldraw[red] (0,2.75) circle (0.06);\r\n\t\t\t\t\t\\draw[red](0.1,2.4) node{\\footnotesize$z^{(d+1)}$};\r\n\t\t\t\t\t\\draw[red] (-3.2,1.7)--(0,2.75);\r\n\t\t\t\t\t\\draw[red] (0,2.75)--(3.4,1.6);\r\n\t\t\t\t\t\\draw[red] (3.4,1.6)--(-3.2,1.7);\r\n\t\t\t\t\t\\draw[decorate] (0.5,3)--(0.5,2.75);\r\n\t\t\t\t\t\\draw(1.7,2.875) node{$ta^2s^{-2/(d+1)}$};\r\n\t\t\t\t\\end{tikzpicture}\r\n\t\t\t\t\\caption{Construction in $B^d(0,1)$ for $d=2$}\r\n\t\t\t\t\\label{fig:construction}\r\n\t\t\t\\end{figure}\r\n\t\t\t\r\n\t\t\tIn the sequel, we use the same notation as in the context of Lemma \\ref{lemma_geometry}.\r\n\t\t\tWe consider the simplex $S(z^{(1)},\\dots,z^{(d+1)})$, where $z^{(i)}\\in C_i^x$ for $i\\in\\{1,\\dots,d\\}$ and $z^{(d+1)}=x^{(d+1)}-ta^2s^{-2/(d+1)}e_1$ for $t\\in[0,1/2]$ (see Figure \\ref{fig:construction}). Due to the choice of $C_i^x$ we have for $s$ sufficiently large and $t\\in[0,1/2]$,\r\n\t\t\t\\begin{align}\\label{rho_d+1_l}\r\n\t\t\t\t \\rho_{d+1}\\geq 1-(a+a^2)s^{-2/(d+1)}\r\n\t\t\t\\end{align}\r\n\t\t\tand\r\n\t\t\t\\begin{align}\\label{h_quer_l_und_u}\r\n\t\t\t\\frac{a^2}{4} s^{-2/(d+1)}\t\\leq \\Big(a^2-\\varepsilon_ha^2-\\frac{a^2}{2}\\Big)s^{-2/(d+1)}\\leq\\bar{h}\\leq a^2s^{-2/(d+1)},\r\n\t\t\t\\end{align}\r\n\t\t\twhere we used $\\varepsilon_h\\in(0,1/4)$.\r\n\t\t\t\r\n\t\t\tFor $i\\in\\{1,\\dots,d\\}$ we show in the following that we can control $h_i=\\mathrm{dist}(T_i,\\bar{z}_{d+1})$ with the choice of $\\varepsilon_h,\\varepsilon_\\ell$ uniformly for $s$ sufficiently large.\r\n\t\t\tDefine $\\widetilde{F}_{d+1}=S(x^{(1)},\\dots,x^{(d)})$ and let $\\bar{z}_{d+1}$ and $\\bar{x}_{d+1}$ denote the projections of $z^{(d+1)}$ to $F_{d+1}$ and $\\widetilde{F}_{d+1}$, respectively.\r\n\t\t\tMoreover, let $\\widetilde{T}_{i}=S(x^{(1)},\\dots,x^{(i-1)},x^{(i+1)},\\dots,x^{(d)})$. Then, for each $y\\in T_{i}$ there exists a $\\tilde{y}\\in \\widetilde{T}_{i}$ such that $\\lVert y-\\tilde{y}\\rVert\\leq (d-1)\\varepsilon_\\ell\\sqrt{a}s^{-1/(d+1)}+\\varepsilon_ha^2s^{-2/(d+1)}$. Hence, with \\eqref{h(Sk)},\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\sqrt{\\frac{2}{d(d-1)}}\\sqrt{a}s^{-1/(d+1)} & =\\mathrm{dist}(\\bar{x}_{d+1},\\widetilde{T}_i)\\\\\r\n\t\t\t\t&\\leq \\mathrm{dist}(\\bar{x}_{d+1},T_i)+(d-1)\\varepsilon_\\ell\\sqrt{a}s^{-1/(d+1)}+\\varepsilon_ha^2s^{-2/(d+1)}\\\\\r\n\t\t\t\t&\\leq \\lVert\\bar{x}_{d+1}-\\bar{z}_{d+1}\\rVert+h_i+(d-1)\\varepsilon_\\ell\\sqrt{a}s^{-1/(d+1)}+\\varepsilon_ha^2s^{-2/(d+1)}.\r\n\t\t\t\\end{align*}\r\n\t\t\tFor the distance of the projections we have\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\label{dist_xz}\r\n\t\t\t\t\\lVert \\bar{x}_{d+1}-\\bar{z}_{d+1}\\rVert\\leq \\lVert \\bar{x}_{d+1}-z^{(d+1)}\\rVert+\\lVert z^{(d+1)}-\\bar{z}_{d+1}\\rVert\\leq 2a^2s^{-2/(d+1)}.\r\n\t\t\t\\end{align}\r\n\t\t\tHence, we derive for $h_i$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\th_i&\\geq \\sqrt{\\frac{2}{d(d-1)}}\\sqrt{a}s^{-1/(d+1)}-2a^2s^{-2/(d+1)}-(d-1)\\varepsilon_\\ell\\sqrt{a}s^{-1/(d+1)}-\\varepsilon_ha^2s^{-2/(d+1)}.\r\n\t\t\t\\end{align*}\r\n\t\t\tNote that $2a^2 s^{-2/(d+1)}\\leq\\frac{1}{2} \\sqrt{\\frac{2}{d(d-1)}}\\sqrt{a} s^{-1/(d+1)}$ for $s$ sufficiently large. Therefore, we can choose $\\varepsilon_\\ell,\\varepsilon_h>0$ small enough such that for all $t\\in[0,1/2]$ and $s$ sufficiently large,\r\n\t\t\t\\begin{align}\\label{h_i_l}\r\n\t\t\t\th_i\\geq c_{h,l}\\sqrt{a}s^{-1/(d+1)}\r\n\t\t\t\\end{align}\r\n\t\t\twith a constant $c_{h,l}>0$. Here the constant $c_{h,l}$ does not depend on $a$, while the lower bound for $s$ such that the inequality holds may depend on $a$. The same applies to the inequalities and constants in the sequel if not stated otherwise.\tMoreover, using again \\eqref{dist_xz} as well as $\\varepsilon_h, \\varepsilon_\\ell \\leq 1/4$, we have\r\n\t\t\t\\begin{align}\r\n\t\t\t\th_i&\\leq \\lVert \\bar{x}_{d+1}-\\bar{z}_{d+1}\\rVert+\\mathrm{dist}(\\bar{x}_{d+1}, T_i)\\nonumber\\\\&\\leq \\lVert \\bar{x}_{d+1}-\\bar{z}_{d+1}\\rVert+\\mathrm{dist}(\\bar{x}_{d+1}, \\widetilde{T}_i) +(d-1)\\varepsilon_\\ell \\sqrt{a}s^{-1/(d+1)}+\\varepsilon_ha^2s^{-2/(d+1)}\\nonumber\\\\&\\leq \\frac{9}{4}a^2s^{-2/{(d+1)}}+ \\left(\\sqrt{\\frac{2}{d(d-1)}}+\\frac{d-1}{4}\\right) \\sqrt{a}s^{-1/(d+1)} \\nonumber\\\\ &\\leq c_{h,u}\\sqrt{a}s^{-1/(d+1)} \\label{h_i_u}\r\n\t\t\t\\end{align}\r\n\t\t\tfor a suitable constant $c_{h,u}>0$, $t\\in[0,1/2]$, and $s$ sufficiently large.\r\n\t\t\t\t\r\n\t\t\tBy e.g. \\cite[Section 6, p.\\ 367]{B92} the $k$-dimensional volume $\\lambda_k$ of a $k$-dimensional regular simplex $S_k$ with edge length $2\\ell$ is\r\n\t\t\t\\begin{align}\\label{lambda(Sk)}\r\n\t\t\t\t\\lambda_k(S_k)=\\frac{(2\\ell)^k}{k!}\\sqrt{\\frac{k+1}{2^k}}\r\n\t\t\t\\end{align}\r\n\t\t\tfor $k\\in\\mathbb{N}$.\r\n\t\t\tBy definition $\\widetilde{T}_{i}$ with $i\\in\\{1,\\dots,d\\}$ is a regular $(d-2)$-dimensional simplex of side length $2\\ell=2\\sqrt{a}s^{-1/{(d+1)}}$.\r\n\t\t\tWe know that the $(d-2)$-dimensional volume of a $(d-2)$-dimensional regular simplex of side length $2\\sqrt{a}$ in $\\mathbb{R}^d$ is continuous with regard to translations of the vertices. Therefore, we can choose a cube around each vertex small enough such that moving each vertex within the corresponding cube changes the $(d-2)$-dimensional volume of the $(d-2)$-dimensional simplex only slightly. Due to homogeneity we can transfer this result to a regular simplex of side length $2\\sqrt{a}s^{-1/{(d+1)}}$ for all $s\\geq 1$, where each side of the cubes is scaled by $s^{-1/{(d+1)}}$. Hence, we can choose $\\varepsilon_h,\\varepsilon_\\ell\\in(0,1/4)$ small enough such that with \\eqref{lambda(Sk)} for $s$ sufficiently large,\r\n\t\t\\begin{align}\r\n\t\t\t\\lambda_{d-2}(T_i)&\\geq \\frac{1}{2} \t\\lambda_{d-2}(S(x^{(1)},\\dots,x^{(i-1)},x^{(i+1)},\\dots x^{(d)}))=\\frac{2^{(d-2)/2}\\sqrt{d-1}}{2(d-2)!}(\\sqrt{a} s^{-1/(d+1)})^{d-2} \\nonumber\\\\&=: c_{T,l}a^{(d-2)/2}s^{-(d-2)/(d+1)}\\label{T_i_l}\r\n\t\t\\end{align}\r\n\t\tand\r\n\t\t\\begin{align}\r\n\t\t\t\\lambda_{d-2}(T_i)\\leq c_{T,u}a^{(d-2)/2}s^{-(d-2)/(d+1)}\\label{T_i_u}\r\n\t\t\\end{align}\r\n\t\tfor a suitable constant $c_{T,u}>0$.\r\n\t\tTogether with \\eqref{h_i_u}, it holds\r\n\t\t\\begin{align*}\r\n\t\t\t\\lambda_{d-1}(F_{d+1})&=\\frac{1}{d-1}\\sum_{i=1}^d\\lambda_{d-2}(T_i)h_i\\\\&\\leq \\frac{1}{d-1}\\sum_{i=1}^dc_{T,u}a^{(d-2)/2}s^{-(d-2)/(d+1)}c_{h,u}\\sqrt{a}s^{-1/(d+1)}\r\n\t\t\\end{align*}\r\n\t\tand with \\eqref{h_quer_l_und_u},\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lambda_{d-1}(F_i)&=\\frac{1}{d-1}\\lambda_{d-2}(T_i)\\sqrt{h_i^2+\\bar{h}^2}\\\\\r\n\t\t\t\t&\\leq \\frac{1}{d-1}c_{T,u}a^{(d-2)/2}s^{-(d-2)/(d+1)}\\sqrt{c_{h,u}^2as^{-2/(d+1)}+a^4s^{-4/(d+1)}}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor $i\\in\\{1,\\dots,d\\}$. Hence, we have for $j\\in\\{1,\\dots,d+1\\}$ and $s$ sufficiently large,\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\lambda_{d-1}(F_j)\\leq c_{F,u}a^{(d-1)/2}s^{-(d-1)/(d+1)}\\label{F_i_u}\r\n\t\t\t\\end{align}\r\n\t\t\t for a suitable constant $c_{F,u}>0$. Analogously, we have for $s$ sufficiently large,\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\label{F_i_l}\r\n\t\t\t\t\\lambda_{d-1}(F_j)&\\geq c_{F,l}a^{(d-1)/2}s^{-(d-1)/(d+1)}\r\n\t\t\t\\end{align}\r\n\t\t\tfor a suitable constant $c_{F,l}>0$ and $j\\in\\{1,\\dots,d+1\\}$.\r\n\t\t\tDue to the fundamental theorem of calculus we have for $x>y>0$,\r\n\t\t\t\\begin{align}\\label{FTC_l}\r\n\t\t\t\t\\sqrt{x^2+y^2}-x=\\int_0^{y^2}\\frac{1}{2\\sqrt{x^2+z}}\\;\\mathrm{d}z\\geq y^2\\frac{1}{2\\sqrt{x^2+y^2}}\\geq \\frac{y^2}{2\\sqrt{2}x}\r\n\t\t\t\\end{align}\r\n\t\t\tand \r\n\t\t\t\\begin{align}\\label{FTC_u}\r\n\t\t\t\t\\sqrt{x^2+y^2}-x\\leq \\frac{y^2}{2x}.\r\n\t\t\t\\end{align}\r\n\t\t\tWe can assume without loss of generality that $p_10$, $t\\in[0,1/2]$ and $s$ sufficiently large,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\alpha_1\\Delta_{p_1}+\\alpha_2\\Delta_{p_2}\\\\&\\geq \\frac{\\alpha_1+\\alpha_2}{d-1}\\sum_{i=1}^d c_{T,l}a^{(d-2)/2}s^{-(d-2)/(d+1)}\\frac{\\frac{a^4}{16}s^{-4/(d+1)}}{2\\sqrt{2}c_{h,u}a^{1/2}s^{-1/(d+1)}}\\\\&\\;\\;\\;\\;-(\\lvert\\alpha_1\\rvert+\\lvert\\alpha_2\\rvert)2^{p_2}(a+a^2)s^{-2/(d+1)}\\sum_{i=1}^{d+1}c_{F,u}a^{(d-1)/2}s^{-(d-1)/(d+1)}\\\\\r\n\t\t\t\t&\\geq \\tilde{c}_{d}a^{(d+5)/2}s^{-1}-\\tilde{c}_{d,p_1,p_2}(a^{(d+3)/2}+a^{(d+1)/2})s^{-1}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor suitable constants $\\tilde{c}_{d},\\tilde{c}_{d,p_1,p_2}>0$, where we used that $\\rho_{d+1}\\geq \\frac{1}{2}$ for $s$ sufficiently large. \r\n\t\t\tHence, we can fix $a>0$ large enough such that this estimation provides for $\\alpha_1\\neq-\\alpha_2$ the existence of a constant $\\tilde{c}_1>0$ for which\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\lvert\\alpha_1\\Delta_{p_1}+\\alpha_2\\Delta_{p_2}\\rvert\\geq\\tilde{c}_1a^{(d+5)/2}s^{-1}\\label{alpha1+alpha2neq0}\r\n\t\t\t\\end{align} \r\n\t\tfor $s$ sufficiently large and $t\\in[0,1/2]$.\r\n\r\n\t\t\t\t\t\t\\begin{figure}[htb]\r\n\t\t\t\t\\begin{minipage}[t]{.35\\linewidth}\r\n\t\t\t\t\t\\begin{tikzpicture}[decoration={brace,amplitude=2mm}, scale=0.8]\r\n\t\t\t\t\t\t\\vspace{100pt}\r\n\t\t\t\t\t\t\\filldraw[black] (0,-2) circle (0.06);\r\n\t\t\t\t\t\t\\draw(0,-2.3) node{\\footnotesize$0$};\r\n\t\t\t\t\t\t\\filldraw[black] (2,1.5) circle (0.06);\r\n\t\t\t\t\t\t\\filldraw[black] (-2,1.5) circle (0.06);\r\n\t\t\t\t\t\t\\draw(-2.5,1.5) node{\\footnotesize $\\widetilde{F}_{d+1}$};\r\n\t\t\t\t\t\t\\filldraw[red](0,3.5) circle (0.06);\r\n\t\t\t\t\t\t\\draw[red](0.1,3.8) node{\\footnotesize$z^{(d+1)}$};\r\n\t\t\t\t\t\t\\draw (2,1.5)--(-2,1.5);\r\n\t\t\t\t\t\t\\filldraw[red] (2.2,1.5) circle (0.06);\r\n\t\t\t\t\t\t\\filldraw[red] (-2.2,2.1) circle (0.06);\r\n\t\t\t\t\t\t\\draw[red](-2.8,2) node{\\footnotesize$F_{d+1}$};\r\n\t\t\t\t\t\t\\draw[red] (-2.2,2.1)--(2.2,1.5);\r\n\t\t\t\t\t\t\\draw[red] (0,3.5)--(2.2,1.5);\r\n\t\t\t\t\t\t\\draw[red] (-2.2,2.1)--(0,3.5);\r\n\t\t\t\t\t\t\\draw[dashed,blue](0,-2)--(0,3.5);\r\n\t\t\t\t\t\t\\filldraw[blue] (0,1.8)circle (0.06);\r\n\t\t\t\t\t\t\\draw[blue](0.25,1.95) node{\\footnotesize$\\bar{z}_{0}$};\r\n\t\t\t\t\t\t\\filldraw[blue] (0,1.5)circle (0.06);\r\n\t\t\t\t\t\t\\draw[blue](-0.4,1.25) node{\\footnotesize$\\bar{x}_{d+1}$};\r\n\t\t\t\t\t\t\\draw[dashed,blue](0,-2)--(0.5087,1.73);\r\n\t\t\t\t\t\t\\filldraw[blue](0.5087,1.73)circle (0.06);\r\n\t\t\t\t\t\t\\draw[blue](1,1.875) node{\\footnotesize$u_{d+1}$};\r\n\t\t\t\t\t\t\\draw[dashed,blue](0.5087,1.5)--(0.5087,1.73);\r\n\t\t\t\t\t\t\\filldraw[blue](0.5087,1.5)circle (0.06);\r\n\t\t\t\t\t\t\\draw[blue](0.7,1.3) node{\\footnotesize$\\bar{u}$};\r\n\t\t\t\t\t\t\\draw[dashed,blue](0,3.5)--(-0.228,1.831);\r\n\t\t\t\t\t\t\\filldraw[blue](-0.228,1.831) circle (0.06);\r\n\t\t\t\t\t\t\\draw[blue](-0.6,2.1) node{\\footnotesize$\\bar{z}_{d+1}$};\r\n\t\t\t\t\t\\end{tikzpicture}\r\n\t\t\t\t\t\\caption{Point configuration on $F_{d+1}$ and $\\widetilde{F}_{d+1}$}\r\n\t\t\t\t\t\\label{fig:point config}\r\n\t\t\t\t\\end{minipage}\r\n\t\t\t\t\\begin{minipage}[t]{.65\\linewidth}\t\t\r\n\t\t\t\t\t\\centering\t\t\\raisebox{1.7em}{\r\n\t\t\t\t\t\t\\begin{tikzpicture}[decoration={brace,amplitude=2mm},scale=0.8]\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\\filldraw[black] (0,0) circle (0.06);\r\n\t\t\t\t\t\t\t\\draw(0,-0.3) node{\\footnotesize$0$};\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\\filldraw[red](0,5) circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[red](0.1,5.3) node{\\footnotesize$z^{(d+1)}$};\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\\filldraw[red] (6,2) circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[red](6.4,2) node{\\footnotesize$T_i$};\r\n\t\t\t\t\t\t\t\\filldraw[red] (-4,3) circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[red](-4,2.6) node{\\footnotesize$z^{(i)}$};\r\n\t\t\t\t\t\t\t\\draw[red](3.9,3.5) node{\\footnotesize$F_i$};\r\n\t\t\t\t\t\t\t\\draw[red] (-4,3)--(6,2);\r\n\t\t\t\t\t\t\t\\draw[red] (-4,3)--(0,5);\r\n\t\t\t\t\t\t\t\\draw[red] (0,5)--(6,2);\r\n\t\t\t\t\t\t\t\\draw[dashed,blue] (0,5)--(-0.2376,2.624);\r\n\t\t\t\t\t\t\t\\filldraw[blue] (-0.2376,2.624)circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[blue](-0.2376,2.3) node{\\footnotesize$\\bar{z}_{d+1}$};\r\n\t\t\t\t\t\t\t\\draw[dashed,blue] (0,0)--(0.257,2.57);\r\n\t\t\t\t\t\t\t\\filldraw[blue] (0.257,2.57)circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[blue](-0.4,1) node{\\footnotesize$\\rho_{d+1}$};\r\n\t\t\t\t\t\t\t\\filldraw[blue] (2,4)circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[dashed,blue] (0,0)--(2,4);\r\n\t\t\t\t\t\t\t\\draw[blue](0.8,1) node{\\footnotesize$\\rho_{i}$};\r\n\t\t\t\t\t\t\t\\draw[->,thick, green] (1.841,2.4158)--(2,4);\r\n\t\t\t\t\t\t\t\\filldraw[green] (0.4160, 4.1542)circle (0.06);\r\n\t\t\t\t\t\t\t\\draw[->,thick,green] (0.257,2.57)--(0.4160, 4.1542);\r\n\t\t\t\t\t\t\t\\draw[green](2.8,3) node{\\footnotesize $\\beta_ihu_{d+1}$};\r\n\t\t\t\t\t\t\t\\draw[->,thick,cyan] (0.4160, 4.1542)--(2,4);\r\n\t\t\t\t\t\t\t\\draw[cyan](1.2,3.8) node{\\footnotesize $v_{i}$};\r\n\t\t\t\t\t\t\t\\draw[blue](2.3,4.1) node{\\footnotesize $u_{i}$};\r\n\t\t\t\t\t\t\t\\draw[blue](0.7,2.25) node{\\footnotesize $u_{d+1}$};\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\\end{tikzpicture}}\r\n\t\t\t\t\t\\caption{Decomposition of the projection of $0$ to $F_i$}\r\n\t\t\t\t\t\\label{fig:projection}\r\n\t\t\t\t\\end{minipage}\r\n\t\t\t\\end{figure}\r\n\t\t\r\n\t\t\tFor $\\alpha_1=-\\alpha_2$ we fix $a\\in(0,1)$. To use the second part of Lemma \\ref{lemma_geometry} we need an estimate for\r\n\t\t\t$\\rho_i-\\rho_{d+1}$ for $i\\in\\{1,\\dots,d\\}$. \r\n\t\t\tLet $u_{i}$ be the projection of $0$ to $F_i$ for $i\\in\\{1,\\dots, d+1\\}$ and note that $\\bar{x}_{d+1}$, which we introduced as the projection of $x^{(d+1)}$ to $\\widetilde{F}_{d+1}$, is also the projection of $0$ on $\\widetilde{F}_{d+1}$. Then, for every $i\\in\\{1,\\dots, d\\}$, there exist a constant $\\beta_i\\geq 0$ and a vector $v_i$ orthogonal to $u_{d+1}$ such that\r\n\t\t\t$$\r\n\t\t\tu_i = (1+\\beta_ih) u_{d+1}+v_i\r\n\t\t\t$$\r\n\t\t\tand, thus,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\rho_i^2=\\lVert u_i\\rVert^2=(1+\\beta_ih)^2\\lVert u_{d+1}\\rVert^2+\\lVert v_i\\rVert^2=(1+\\beta_ih)^2\\rho_{d+1}^2+\\lVert v_i\\rVert^2.\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $\\bar{u}$ be the projection of $u_{d+1}$ to $\\widetilde{F}_{d+1}$, while $\\bar{z}_0$ is the intersection point of $F_{d+1}$ with the line through $0$ and $z^{(d+1)}$ (see Figure \\ref{fig:point config}). We show that we can choose $\\varepsilon_h>0$ small enough such that $u_{d+1}$ is very close to $\\bar{z}_0$ to ensure a minimum distance from $u_{d+1}$ to $T_i$. It holds \r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lVert \\bar{x}_{d+1}\\rVert^2+\\lVert \\bar{x}_{d+1}-\\bar{u}\\rVert^2=\\lVert \\bar{u}\\rVert^2\\leq\\lVert u_{d+1}\\rVert^2\\leq\\lVert \\bar{z}_{0}\\rVert^2\\leq (\\lVert \\bar{x}_{d+1}\\rVert+\\varepsilon_ha^2s^{-2/(d+1)})^2,\r\n\t\t\t\\end{align*}\r\n\t\t\twhich implies\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lVert \\bar{x}_{d+1}-\\bar{u}\\rVert^2\\leq 2\\lVert \\bar{x}_{d+1}\\rVert\\varepsilon_ha^2s^{-2/(d+1)}+\\varepsilon_h^2 a^4s^{-4/(d+1)}.\r\n\t\t\t\\end{align*}\r\n\t\t\tThis provides\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\lVert \\bar{z}_0-u_{d+1}\\rVert^2&\\leq \t\\lVert \\bar{x}_{d+1}-\\bar{u}\\rVert^2+ \\varepsilon_h^2a^4s^{-4/(d+1)}\r\n\t\t\t\t\\leq 2\\varepsilon_h a^2s^{-2/(d+1)}+ 2 \\varepsilon_h^2a^4s^{-4/(d+1)}.\r\n\t\t\t\\end{align*}\r\n\t\t\tHence, we can choose $\\varepsilon_h\\in(0,1/4)$ small enough such that \r\n\t\t\t\\begin{align}\\label{z0-u_d+1}\r\n\t\t\t\t\\lVert \\bar{z}_0-u_{d+1}\\rVert\\leq \\frac{1}{4}\\sqrt{\\frac{2}{d(d-1)}}as^{-1/(d+1)}=\\frac{\\sqrt{a}}{4}\\sqrt{\\frac{2}{d(d-1)}}\\ell\\leq \\frac{1}{4}\\sqrt{\\frac{2}{d(d-1)}}\\ell\r\n\t\t\t\\end{align}\r\n\t\t\tsince $a\\in(0,1)$. For $\\varepsilon_\\ell>0$ small enough such that for $s$ sufficiently large,\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\mathrm{dist} (\\bar{z}_0,T_i)\\geq \\mathrm{dist}(\\bar{x}_{d+1},\\widetilde{T}_i)-2\\varepsilon_ha^2s^{-2/(d+1)}-(d-1)\\varepsilon_\\ell\\sqrt{a}s^{-1/(d+1)}\\geq\\frac{1}{2} \\sqrt{\\frac{2}{d(d-1)}}\\ell,\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\\eqref{z0-u_d+1} implies that $\\mathrm{dist}(u_{d+1}, T_i)\\geq \\frac{1}{4}\\sqrt{\\frac{2}{d(d-1)}}\\ell$ for $i\\in\\{1,\\dots,d\\}$ and $s$ sufficiently large. Then, for $\\lVert v_i\\rVert\\leq \\frac{1}{8} \\sqrt{\\frac{2}{d(d-1)}}\\ell $, $\\mathrm{dist}(u_i,T_i)$ is at least $\\frac{1}{8}\\sqrt{\\frac{2}{d(d-1)}}\\ell$ since $\\mathrm{dist} (u_{d+1},T_i)\\leq \\|v_i\\| + \\mathrm{dist}(u_i,T_i)$ (see Figure \\ref{fig:projection}). Hence, with the intercept theorem we have together with \\eqref{h_quer_l_und_u} and \\eqref{h_i_u},\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\rho_i-\\rho_{d+1}&\\geq\\beta_ih\\lVert u_{d+1}\\rVert = \\bar{h} \\frac{\\mathrm{dist}(u_i,T_i)}{\\mathrm{dist}(z^{(d+1)},T_i)} \\\\\r\n\t\t\t\t&\\geq \\frac{1}{8}\\sqrt{\\frac{2}{d(d-1)}}\\ell\\cdot \\frac{\\bar{h}}{\\sqrt{\\bar{h}^2+h_i^2}}\\\\\r\n\t\t\t\t&\\geq \\frac{1}{8}\\sqrt{\\frac{2a}{d(d-1)}}s^{-1/(d+1)}\\cdot \\frac{\\frac{1}{4}a^2s^{-2/(d+1)}}{\\sqrt{a^4s^{-4/(d+1)}+c_{h,u}^2a s^{-2/(d+1)}}}\\\\\r\n\t\t\t\t&\\geq c_{\\rho,l} a^2s^{-2/(d+1)}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor a suitable constant $c_{\\rho,l}>0$.\r\n\t\t\tIf $\\lVert v_i\\rVert>\\frac{1}{8}\\sqrt{\\frac{2}{d(d-1)}}\\ell$, we have $$\\rho_i^2-\\rho_{d+1}^2\\geq\\rho_i^2-(1+\\beta_ih)^2\\rho_{d+1}^2 =\\|v_i\\|^2 >\\frac{1}{64}\\frac{2}{d(d-1)}\\ell^2.$$ Hence,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\rho_i-\\rho_{d+1}\\geq \\frac{2}{64(\\rho_i+\\rho_{d+1})d(d-1)}\\ell^2\\geq \\frac{1}{64d(d-1)}\\ell^2=\\frac{a}{64d(d-1)}s^{-2/(d+1)},\r\n\t\t\t\\end{align*}\r\n\t\t\ti.e. altogether we have\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\rho_i-\\rho_{d+1}\\geq c_{\\rho,l,a}s^{-2/(d+1)}\\label{rho_i-rho_d+1}\r\n\t\t\t\\end{align}\r\n\t\t\tfor $s$ sufficiently large with a suitable constant $c_{\\rho,l,a}>0$ that depends on $a$.\r\n\t\t\t\r\n\t\t\tTogether with Lemma \\ref{lemma_geometry} and the inequalities \\eqref{rho_d+1_l}, \\eqref{h_quer_l_und_u}, \\eqref{h_i_l}, \\eqref{T_i_u}, \\eqref{F_i_u}, \\eqref{F_i_l}, \\eqref{FTC_u}, \\eqref{rho_i-rho_d+1} this provides for a fixed $a\\in(0,1)$, $t\\in[0,1/2]$ and $s$ sufficiently large,\r\n\t\t\t\\begin{align}\r\n\t\t\t\t&\\Delta_{p_1}-\\Delta_{p_2}\\geq \\sum_{i=1}^d(p_2-p_1) c_{\\rho,l,a}s^{-2/(d+1)}c_{F,l}a^{(d-1)/2}s^{-(d-1)/(d+1)}\\nonumber\\\\\r\n\t\t\t\t&-2^{p_2+2}(a+a^2)^2s^{-4/(d+1)}\\sum_{i=1}^dc_{F,u}a^{(d-1)/2}s^{-(d-1)/(d+1)}\\nonumber\\\\\r\n\t\t\t\t&-2^{p_2}(a+a^2)s^{-2/(d+1)}\\sum_{i=1}^d c_{T,u}a^{(d-2)/2}s^{-(d-2)/(d+1)}\\frac{a^4s^{-4/(d+1)}}{2c_{h,l}a^{1/2}s^{-1/(d+1)}}\\nonumber\\\\\r\n\t\t\t\t&=:C_{a,1}s^{-1}-C_{a,2}s^{-(d+3)/(d+1)}-C_{a,3}s^{-(d+3)/(d+1)},\\label{alpha1=-alpha2}\r\n\t\t\t\\end{align}\r\n\t\t\twhich can be bounded from below by $\\frac{1}{2} C_{a,1}s^{-1}$ for $s$ sufficiently large.\r\n\t\t\tAltogether, for $\\alpha_1\\neq -\\alpha_2$ we fix $a>0$ sufficiently large such that \\eqref{alpha1+alpha2neq0} holds and for $\\alpha_1=-\\alpha_2$ we fix $a\\in(0,1)$ such that \\eqref{alpha1=-alpha2} holds. Then, for $$C_\\alpha=\\begin{cases}\r\n\t\t\t\t\\frac{1}{2}C_{a,1}, &\\text{ for }\\alpha_1=-\\alpha_2,\\\\\r\n\t\t\t\t\\tilde{c}_1a^{(d+5)/2}, &\\text{ else},\r\n\t\t\t\\end{cases}$$ it holds that\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\lvert\\alpha_1\\Delta_{p_1}+\\alpha_2\\Delta_{p_2}\\rvert\\geq C_\\alpha s^{-1}\\label{alphadelta1+alphadelta2}\r\n\t\t\t\\end{align}\r\n\t\t\tfor all $t\\in[0,1/2]$ and $s$ sufficiently large.\r\n\t\t\t\r\n\t\t\tFor the application of Theorem \\ref{thm:varbound} we consider the situation that $z^{(1)},\\dots,z^{(d)}$ are points of the Poisson process and the point $z^{(d+1)}$ is added. To ensure that the change of $\\alpha_1\\widetilde{A}_{p_1}+\\alpha_2\\widetilde{A}_{p_2}$ is given by $s(\\alpha_1\\Delta_{p_1}+\\alpha_2\\Delta_{p_2})$ we require that no further points of $\\eta_s$ are present which prevent that $z^{(1)},\\dots, z^{(d)}$ is a facet of the random polytope or which could be connected to $z^{(d+1)}$ by edges.\r\n\t\t\tTherefore, we consider the set\r\n\t\t\t\\begin{align*}\r\n\t\t\tM_s^x=\\{y=(y_1,\\dots,y_d)\\in B^d(0,1):y_1\\geq 1-c_as^{-2/(d+1)}\\}.\r\n\t\t\t\\end{align*}\r\n\t\t\tfor some constant $c_a>0$, which might depend on $a$. \r\n\t\t\t\r\n\r\n\t\t\tFirst, we show that the constant $c_a>0$ can be chosen independently from $s$ such that $(B^d(0,1)\\backslash \tM_s^x)\\cap \\mathrm{Vis}(z^{(d+1)},\\mathrm{Conv}(z^{(1)},\\dots,z^{(d)},x^{(d+1)}))=\\emptyset$ for all $z^{(1)}\\in C_1^x,\\hdots,z^{(d)}\\in C_d^x$, i.e.\\ that $c_a>0$ can be chosen in such a way that any line on the boundary of $\\mathrm{Conv}(z^{(1)},\\dots,z^{(d)},x^{(d+1)})$ through $x^{(d+1)}$ meets the hyperplane $\\{y=(y_1,\\dots,y_d)\\in \\mathbb{R}^d:y_1=1-c_as^{-2/(d+1)}\\}$ outside the ball $B^d(0,1)$. Note that this implies that $(B^d(0,1)\\backslash \tM_s^x)\\cap\\mathrm{Vis}(z^{(d+1)},\\mathrm{Conv}(z^{(1)},\\dots,z^{(d+1)}))=\\emptyset$ for all $t\\in[0,1/2]$. With \\eqref{dist_xz} and \\eqref{h_i_l} it holds that \r\n\t\t\t\t\t\\begin{align}\r\n\t\t\t\t\t\t\\mathrm{dist}(\\bar{x}_{d+1},T_i)&\\geq h_i-\\lVert \\bar{x}_{d+1}-\\bar{z}_{d+1}\\rVert\\geq c_{h,l}\\sqrt{a}s^{-1/(d+1)}-2a^2s^{-2/(d+1)}\\nonumber\\\\&\\geq \\tilde{c}_{h,l}\\sqrt{a}s^{-1/(d+1)}\\label{eq:dist(x_d+1,T_i)}\r\n\t\t\t\t\t\\end{align}\r\n\t\t\t\t\tfor $\\tilde{c}_{h,l}=\\frac{c_{h,l}}{2}$, $i\\in\\{1,\\dots,d\\}$ and $s$ sufficiently large.\r\n\t\t\t\t\t\r\n\r\n\t\t\t\t\tLet $B_C^{d-1}=B^{d}(\\bar{x}_{d+1},\\tilde{c}_{h,l}\\sqrt{a}s^{-1/(d+1)})\\cap H$. Then, because of \\eqref{eq:dist(x_d+1,T_i)}, $$\\mathrm{Vis}(x^{(d+1)},\\mathrm{Conv}(z^{(1)},\\dots,z^{(d)},x^{(d+1)}))$$ is a subset of the visibility region at $x^{(d+1)}$ of the smallest cone $K$ with apex $x^{(d+1)}$ that contains $B_C^{d-1}$. Hence, if we choose $c_a>0$ such that $(B^d(0,1)\\backslash \tM_s^x)\\cap\\mathrm{Vis}(x^{(d+1)},K)=\\emptyset$, then also $(B^d(0,1)\\backslash \tM_s^x)\\cap \\mathrm{Vis}(x^{(d+1)},\\mathrm{Conv}(z^{(1)},\\dots,z^{(d)},x^{(d+1)}))=\\emptyset$.\r\n\t\t\t\tBecause of symmetry it suffices to ensure that the line through $x^{(d+1)}$ and\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\hat{y}&=(1-(a+a^2)s^{-2/{(d+1)}})e_1+\\tilde{c}_{h,l}\\sqrt{a}s^{-1/(d+1)}e_2\r\n\t\t\t\t\\end{align*} meets $H$ outside of $B^d(0,1)$. A point $\\hat{x}_\\gamma$ on the line through $x^{(d+1)}$ and $\\hat{y}$ can be described by\r\n\t\t\t\t\\begin{align}\\label{x_gamma2}\r\n\t\t\t\t\t\\hat{x}_\\gamma=(1-as^{-2/(d+1)})e_1+\\gamma(-a^2s^{-2/(d+1)}e_1+\\tilde{c}_{h,l}\\sqrt{a}s^{-1/(d+1)}e_2)\r\n\t\t\t\t\\end{align}\r\n\t\t\t\tfor $\\gamma\\in\\mathbb{R}$. To determine a possible constant $c_a>0$ we need a $\\gamma>1$ such that the point $x_\\gamma=(x_{\\gamma,1},\\dots,x_{\\gamma,d})$ fulfils $\\lVert x_\\gamma\\rVert>1$. If $x_{\\gamma,1}>1-\\frac{1}{2}\\sum_{i=2}^dx_{\\gamma,i}^2\\geq \\sqrt{1-\\sum_{i=2}^dx_{\\gamma,i}^2}$, it holds that $x_\\gamma\\notin B^d(0,1)$, i.e.\\ $\\hat{x}_{\\gamma}\\notin B^d(0,1)$ if\r\n\t\t\t\t\\begin{align}\r\n\t\t\t\t\t1-(a+\\gamma a^2)s^{-2/(d+1)}>1-\\frac{\\gamma^2}{2}\\tilde{c}_{h,l}^2a s^{-2/(d+1)} \r\n\t\t\t\t\t\\quad\\quad\\Longleftrightarrow\\quad\\quad \\frac{\\gamma^2}{2}\\tilde{c}_{h,l}^2-\\gamma a-1>0.\\label{eq:inequality_gamma2}\r\n\t\t\t\t\\end{align}\r\n\t\t\t\tThis inequality is fulfilled for $\\gamma>1$ large enough independently of $s$. \r\n\t\t\t\tHence, inserting a possible $\\hat{\\gamma}>1$, which fulfils \\eqref{eq:inequality_gamma2}, in \\eqref{x_gamma2} provides that $c_a>0$ can be chosen independently from $s$ as $c_a=a+\\hat{\\gamma}a^2$. From now on let $s$ be sufficiently large such that $1-c_as^{-2/(d+1)}\\geq \\rho_0$.\r\n\t\t\t\r\n\t\t\tDue to translation invariance, the same configuration of sets can be constructed for any $x\\in B^d(0,1)$ with $\\lVert x\\rVert=1-(a+ta^2)s^{-2/(d+1)}$ for $t\\in[0,1/2]$ by defining $M_s^x,C_1^x,\\dots,C_{d}^x$ for each $x$ as the suitable rotated regions. \r\n\t\t\tDefine\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tA=\\{x\\in B^d(0,1):\\lVert x\\rVert= 1-(a+ta^2)s^{-2/(d+1)}\\text{ and }t\\in [0,1/2]\\}.\r\n\t\t\t\\end{align*}\r\n\t\t\tCombining our previous considerations leads to\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\alpha_1D_x\\widetilde{A}_{p_1}+\\alpha_2D_x\\widetilde{A}_{p_2}=s(\\alpha_1\\Delta_{p_1}+\\alpha_2\\Delta_{p_2})\r\n\t\t\t\\end{align*}\r\n\t\t\tif\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\eta_s(C_i^x)=1 \\quad\\text{ for }\\quad i\\in\\{1,\\dots,d\\} \\quad\\text{ and }\\quad\\eta_s\\Big(M_s^x\\backslash\\bigcup\\limits_{i=1}^{d}C_i^x\\Big)=0.\r\n\t\t\t\\end{align*}\r\n\t\t\tfor $s$ sufficiently large.\r\n\t\t\t Together with \\eqref{alphadelta1+alphadelta2} we obtain for $s$ sufficiently large\r\n\t\t\t\\allowdisplaybreaks\r\n\t\t\t\\begin{align}\r\n\t\t\t\t&\\E\\left[\\int\\lvert \\alpha_1D_x\\widetilde{A}_{p_1}+\\alpha_2D_x\\widetilde{A}_{p_2}\\rvert ^2\\;\\mathrm{d}\\lambda(x)\\right]\\geq \\E\\left[\\int_{A}\t\\lvert \\alpha_1D_x\\widetilde{A}_{p_1}+\\alpha_2D_x\\widetilde{A}_{p_2}\\rvert ^2\\;\\mathrm{d}\\lambda(x)\\right]\\nonumber \\\\&\\geq s\\int_{A} \\mathbb{P}(\\lvert \\alpha_1D_x\\widetilde{A}_{p_1}+\\alpha_2D_x\\widetilde{A}_{p_2}\\rvert\\geq C_\\alpha ) C_\\alpha^2\\;\\mathrm{d}x\\nonumber\\\\\r\n\t\t\t\t&\\geq C_\\alpha^2 s\\int_{A}\\mathbb{P}\\Big(\\eta_s\\Big(M_s^x\\backslash\\bigcup\\limits_{i=1}^{d}C_i^x\\Big)=0,\\eta_s(C_1^x)=1,\\dots,\\eta_s(C_{d}^x)=1\\Big) \\;\\mathrm{d}x\\nonumber\\\\\r\n\t\t\t\t&= C_\\alpha^2 s\\int_{A}\\mathbb{P}\\Big(\\eta_s\\Big(M_s^x\\backslash\\bigcup\\limits_{i=1}^{d}C_i^x\\Big)=0\\Big)\\prod_{i=1}^{d}\\mathbb{P}(\\eta_s(C_i^x)=1) \\;\\mathrm{d}x.\\label{eq:prob polytopes}\r\n\t\t\t\\end{align}\r\n\t\tDue to the definition of $C_i^x$ we know that $\\lambda_d(C_i^x)=\\varepsilon_ha^2(\\varepsilon_\\ell\\sqrt{a})^{d-1} s^{-1}$ for $i\\in\\{1,\\dots, d\\}$, i.e. the volume of the sets $C_i^x$ is of order $s^{-1}$. \r\n\t\t\r\n\t\tFor $\\lambda_d(M_s^x)$ we consider at first the radius $r$ of the $(d-1)$-dimensional ball $B_C=\\{y=(y_1,\\dots,y_d)\\in B^d(0,1):y_1= 1-c_as^{-2/(d+1)}\\}$. This radius fulfils $r^2+(1-c_as^{-2/(d+1)})^2=1$. Hence,\r\n\t\t\\begin{align*}\r\n\t\t\tr^2=2c_as^{-2/(d+1)}-c_a^2s^{-4/(d+1)}\\leq 2c_as^{-2/(d+1)}\r\n\t\t\\end{align*}\r\n\t\tand therefore\r\n\t\t$$\r\n\t\t\t\\lambda_d(M_s^x)\\leq \\kappa_{d-1}r^{d-1}c_as^{-2/(d+1)}\\leq\\tilde{c}_as^{-1}\r\n\t\t$$\r\n\t\tfor $\\tilde{c}_a=\\kappa_{d-1}c_a(\\sqrt{2c_a})^{d-1}$. Thus,\r\n\t\t$\\lambda_d(M_s^x\\backslash\\bigcup\\limits_{i=1}^{d}C_i^x)$ is at most of order $s^{-1}$. Therefore, since the Poisson process has intensity $s$, the order of the whole term in \\eqref{eq:prob polytopes} can be bounded from below by a multiple of $s^{-1}\\lambda_d(A)$, where \r\n\t\t\\begin{align*}\r\n\t\t\t\\lambda_d(A)= \\kappa_d \\bigg((1-as^{-2/(d+1)})^d-\\Big(1-\\Big(a+\\frac{a^2}{2}\\Big)s^{-2/(d+1)}\\Big)^d \\bigg) \\geq \\tilde{c} s^{-2/(d+1)}\r\n\t\t\\end{align*}\r\n\tfor a suitable constant $\\tilde{c}>0$ and $s$ sufficiently large. Altogether we have\r\n\t\\begin{align*}\r\n\t\t\\E\\left[\\int\\lvert \\alpha_1D_x\\widetilde{A}_{p_1}+\\alpha_2D_x\\widetilde{A}_{p_2}\\rvert ^2\\;\\mathrm{d}\\lambda(x)\\right]\\geq \\tilde{C} s^{(d-1)/(d+1)}\r\n\t\\end{align*}\r\nfor some constant $\\tilde{C}>0$ and $s$ sufficiently large.\r\n\r\n\t\tNext, we check condition \\eqref{condition}. Due to Lemma \\ref{lemma_scores} we can apply the results in \\cite[Lemma 5.5 and Lemma 5.9]{LSY19}, i.e.\\ there exists a constant $C>0$ satisfying\r\n\t\t\t\\begin{align}\r\n\t\t\t\t\\mathbb{E}\\lvert D_{x}\\widetilde{A}_{p_i}(\\eta_s\\cup U)\\rvert^{5} \\leq C\r\n\t\t\t\t\\label{lemma5.52}\r\n\t\t\t\\end{align}\r\n\t\t\tfor $U\\subset B^d(0,1)$ with $\\lvert U\\rvert\\leq 1$ and for any $\\beta>0$,\r\n\t\t\t\\begin{align}\r\n\t\t\t\ts\\int\\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^\\beta\\;\\mathrm{d}y\\leq C_\\beta \\exp[-c_\\beta sd_{\\max}(x,\\partial B^d(0,1))^{(d+1)}]\r\n\t\t\t\t\\label{lemma5.92}\r\n\t\t\t\\end{align}\r\n\t\t\tfor some constants $C_\\beta, c_\\beta>0$ and $x\\in B^d(0,1)$. Note that the statements of \\cite[Lemma 5.9]{LSY19} contain typos since the exponent $\\alpha$ of ${\\rm d}_s(x_1,K)$ is missing in the upper bounds.\r\n\t\t\tUsing \\eqref{lemma5.52}, the Hölder inequality and Jensen's inequality provides\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\lvert D_{x,y}^2\\widetilde{A}_{p_i}\\rvert^2= \\mathbb{E}\\left[\\lvert D_{x,y}^2\\widetilde{A}_{p_i}\\rvert^2\\mathbbm{1}\\{D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0\\}\\right]\\\\\r\n\t\t\t\t&\\leq (\\mathbb{E}\\lvert D_{x,y}^2\\widetilde{A}_{p_i}\\rvert^{5})^{2/5}\\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^{3/5}\\\\\r\n\t\t\t\t&=\t\\mathbb{E}\\lvert D_{x}\\widetilde{A}_{p_i}(\\eta_s\\cup\\{y\\})-D_{x}\\widetilde{A}_{p_i}(\\eta_s)\\rvert^{5})^{2/5} \\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^{3/5}\\\\\r\n\t\t\t\t&\\leq \\left(2^{4}\\left(\\mathbb{E}\\lvert D_{x}\\widetilde{A}_{p_i}(\\eta_s\\cup\\{y\\})\\rvert^{5}+\\mathbb{E}\\lvert D_{x}\\widetilde{A}_{p_i}(\\eta_s)\\rvert^{5}\\right)\\right)^{2/5}\\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^{3/5}\\\\\r\n\t\t\t\t&\\leq 4C^{2/5}\\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^{3/5}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor $i\\in\\{1,2\\}$. Therefore, using Jensen's inequality and \\eqref{lemma5.92}, it follows\r\n\t\t\t\\allowdisplaybreaks\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\mathbb{E}\\left[\\int_{B^d(0,1)}\\int_{B^d(0,1)} \\left(D_{x,y}^2\\sum_{i=1}^{2}\\alpha_i\\widetilde{A}_{p_i}\\right)^2\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\right]\\\\&\r\n\t\t\t\t\\leq 2\\sum_{i=1}^{2}\\alpha_i^2\\int_{B^d(0,1)}\\int_{B^d(0,1)} \t\\mathbb{E}\\lvert D_{x,y}^2\\widetilde{A}_{p_i}\\rvert^2\\;\\mathrm{d}\\lambda(x)\\;\\mathrm{d}\\lambda(y)\\\\&\r\n\t\t\t\t\\leq 2\\sum_{i=1}^{2}\\alpha_i^2 s\\int_{B^d(0,1)} s\\int_{B^d(0,1)} 4C^{2/5}\\mathbb{P}(D_{x,y}^2\\widetilde{A}_{p_i}\\neq 0)^{3/5}\\;\\mathrm{d}x\\;\\mathrm{d}y\\\\\r\n\t\t\t\t&\\leq 8 \\sum_{i=1}^{2}\\alpha_i^2 C^{2/5} s\\int_{B^d(0,1)} C_{3/5} \\exp[-c_{3/5} s d_{\\max}(x,\\partial B^d(0,1))^{(d+1)}]\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\leq c_{\\alpha}^{(1)} s\\int_{B^d(0,1)} \\exp[-c_{3/5} s (1-\\lVert x\\rVert)^{(d+1)/2}]\\;\\mathrm{d}x\\\\\r\n\t\t\t\t&\\leq c_{\\alpha}^{(2)} s\\int_{0}^1 \\exp[-c_{3/5} s (1-r)^{(d+1)/2}]\\;\\mathrm{d}r = c_{\\alpha}^{(2)} s\\int_{0}^1 \\exp[-c_{3/5} s u^{(d+1)/2}]\\;\\mathrm{d}u \\\\\r\n\t\t\t\t&\\leq c_{\\alpha}^{(3)} s \\int_{0}^{(c_{3/5}s)^{2/(d+1)}} e^{-t^{(d+1)/2}} s^{-2/(d+1)}\\mathrm{dt}\r\n\t\t\t\t\\leq c_{\\alpha}^{(4)} ss^{-2/(d+1)} = c_{\\alpha}^{(4)} s^{(d-1)/(d+1)}\r\n\t\t\t\\end{align*}\r\n\t\t\tfor suitable constants $c_{\\alpha}^{(i)}>0$ for $i\\in\\{1,2,3,4\\}$ and $s$ sufficiently large. This shows together with Theorem \\ref{thm:varbound} that $\\Var[\\alpha_1\\widetilde{A}_{p_1}+\\alpha_2\\widetilde{A}_{p_2}]\\geq cs^{(d-1)/(d+1)}$ for a suitable constant $c>0$. Now \\eqref{eqn:approximation_A_p} yields a lower bound of the same order for $\\alpha_1sA_{p_1}+\\alpha_2sA_{p_2}$, which completes the proof.\r\n\t\t\\end{proof}\r\n\t\tAs a consequence of the lower variance bound in Theorem \\ref{theorem:randompoly_var}, one can derive bounds for the multivariate normal approximation of two $L^p$ surface areas. Therefore, we define the $d_{convex}$-distance. Let $\\mathcal{I}$ be the set of indicators of measurable convex sets in $\\mathbb{R}^2$. Then, for the two-dimensional random vectors $Y$ and $Z$ the $d_{convex}$-distance is defined as\r\n\t\t\\begin{align*}\r\n\t\t\td_{convex}(Y,Z)=\\sup_{h\\in \\mathcal{I}} \\lvert\\E[h(Y)]-\\E[h(Z)]\\rvert.\r\n\t\t\\end{align*}\r\n\t\t\r\n\t\t\\begin{theorem}\r\n\t\t\t\\label{theorem:randompoly_limit}\r\n\t\t\tLet $(A_{p_1},A_{p_2})$ be the vector of $L^p$ surface areas for $p_1,p_2\\in[0,1]$ with $p_1\\neq p_2$. Denote by $\\Sigma(s)$ the covariance matrix of $s^{(d+3)/(2(d+1))}(A_{p_1},A_{p_2})$. Let $N_{\\Sigma(s)}$ be a centred Gaussian random vector with covariance matrix $\\Sigma(s)$. Then there exists a constant $c>0$ such that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\td_{convex}(s^{(d+3)/(2(d+1))}(A_{p_1}-\\E[A_{p_1}],A_{p_2}-\\E[A_{p_2}]), N_{\\Sigma(s)})\\leq c s^{-(d-1)/(2(d+1))}\r\n\t\t\t\t\\end{align*}\r\n\t\t\tfor $s\\geq 1$.\r\n\t\t\\end{theorem}\r\n\t\t\r\n\t\t\\begin{proof}\r\n\t\tFor $s\\geq 1$ we define $\\widetilde{Z}_s = s^{-(d-1)/(2(d+1))}(\\widetilde{A}_{p_1},\\widetilde{A}_{p_2})$. From \\cite[Theorem 4.1 c)]{SY21} with $\\tau=(d-1)/(2(d+1))$, whose assumptions are satisfied by Lemma \\ref{lemma_scores}, it follows that\r\n\t\t\\begin{equation}\\label{eqn:approximation_d_convex}\r\n\t\td_{convex}(\\widetilde{Z}_s-\\mathbb{E}[\\widetilde{Z}_s],N_{\\Sigma(s)}) \\leq \\tilde{c} s^{-(d-1)/(2(d+1))}\r\n\t\t\\end{equation}\r\n\t\tfor $s\\geq 1$ with a constant $\\tilde{c}>0$ if we can check that\r\n\\begin{itemize}\r\n\\item [(i)] for any constant $c_I>0$ there exists a constant $\\tilde{c}_I>0$ such that\r\n\\begin{align*}\r\ns\\int_{B^d(0,1)}\\exp[-c_Isd_{\\max}(x,\\partial B^d(0,1))^{(d+1)}]\\;\\mathrm{d}x\\leq \\tilde{c}_I s^{(d-1)/(d+1)}\r\n\\end{align*}\r\nfor $s\\geq 1$,\r\n\\item [(ii)] $|(\\Sigma(s))_{u,v} - \\Cov(\\widetilde{Z}_s^{(u)},\\widetilde{Z}_s^{(v)})|$ is at most of order $s^{-(d-1)/(2(d+1))}$ for all $u,v\\in\\{1,2\\}$,\r\n\\item [(iii)] $\\|\\Sigma(s)^{-1}\\|_{op}$ is uniformly bounded for $s$ sufficiently large, where $\\lVert\\cdot\\rVert_{\\text{op}}$ denotes the operator norm.\r\n\\end{itemize}\r\nAnalogously to the calculation at the end of the proof of Theorem \\ref{theorem:randompoly_var} one can show (i), while (ii) follows from \\eqref{eqn:approximation_A_p}.\r\n\r\nIn order to establish (iii), we assume that there is a subsequence $(s_n)_{n\\in\\mathbb{N}}$ such that $\\lVert\\Sigma(s_n)^{-1}\\rVert_{\\text{op}}\\to\\infty$ and $s_n\\to\\infty$ as $n\\to\\infty$. From the Poincar\\'e inequality (see \\eqref{eqn:Poincare}), \\eqref{lemma5.52}, \\cite[(5.8) in Lemma 5.10]{LSY19} and (i), one deduces that all variances and, thus, all covariances of the components of $\\widetilde{Z}_s$ are uniformly bounded for $s\\geq1$. By (ii) the same holds for the entries of $\\Sigma(s)$. Thus, there exists a subsequence $(s_{n_k})_{k\\in\\mathbb{N}}$ and a matrix $\\Sigma\\in\\mathbb{R}^{2\\times 2}$ such that $\\Sigma(s_{n_k})\\to\\Sigma$ as $k\\to\\infty$. From Theorem \\ref{theorem:randompoly_var} it follows that $\\Sigma$ is positive definite as $\\alpha^T\\Sigma\\alpha=\\lim\\limits_{k\\to\\infty}\\alpha^T\\Sigma(s_{n_k})\\alpha>0$ for any $\\alpha\\in\\mathbb{R}^2\\backslash\\{0\\}$. Thus, $\\lVert\\Sigma^{-1}\\rVert_{\\text{op}}$ is well-defined and $\\Vert\\Sigma(s_{n_k})^{-1}\\rVert_\\text{op}\\to\\lVert\\Sigma^{-1}\\rVert_{\\text{op}}$ as $k\\to\\infty$. Since this is a contradiction to the assumption, we have shown that $\\lVert\\Sigma(s)^{-1}\\rVert_{\\text{op}}$ is uniformly bounded for $s$ sufficiently large, which is (iii) and completes the proof of \\eqref{eqn:approximation_d_convex}.\r\n\r\nMoreover, let $Z_s = s^{(d+3)/(2(d+1))}(A_{p_1},A_{p_2})=s^{-(d-1)/(2(d+1))}(sA_{p_1},sA_{p_2})$. It follows from the triangle inequality that\r\n\\begin{align*}\r\n& d_{convex}(Z_s-\\mathbb{E}[Z_s],N_{\\Sigma(s)})\\\\ & \\leq d_{convex}(Z_s-\\mathbb{E}[Z_s],\\widetilde{Z}_s-\\mathbb{E}[Z_s]) + d_{convex}(\\widetilde{Z}_s-\\mathbb{E}[Z_s],N_{\\Sigma(s)}) \\\\\r\n& \\leq \\p(Z_s\\neq\\widetilde{Z}_s) + d_{convex}(\\widetilde{Z}_s-\\mathbb{E}[\\widetilde{Z}_s],N_{\\Sigma(s)}+\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s]) \\\\\r\n& \\leq \\p(Z_s\\neq\\widetilde{Z}_s) + d_{convex}(\\widetilde{Z}_s-\\mathbb{E}[\\widetilde{Z}_s],N_{\\Sigma(s)}) + d_{convex}(N_{\\Sigma(s)},N_{\\Sigma(s)}+\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s]).\r\n\\end{align*}\r\nSince the first term on the right-hand side vanishes exponentially fast by \\eqref{eqn:approximation_A_p} and the second one was treated in \\eqref{eqn:approximation_d_convex}, it remains to study the third term. We have that\r\n\\begin{align*}\r\n& d_{convex}(N_{\\Sigma(s)},N_{\\Sigma(s)}+\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s])\\\\ & = d_{convex}(N_{I},N_{I}+\\Sigma(s)^{-1/2}(\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s])) \\\\\r\n& \\leq \\sup_{K\\subseteq\\mathbb{R}^2\\text{ convex}} \\p({\\rm dist}(N_I,\\partial K)\\leq \\|\\Sigma(s)^{-1/2}(\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s])\\| ) \\\\\r\n& \\leq \\sup_{K\\subseteq\\mathbb{R}^2\\text{ convex}} \\p({\\rm dist}(N_I,\\partial K)\\leq \\|\\Sigma(s)^{-1}\\|^{1/2}_{op}\\|\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s]\\| ),\r\n\\end{align*}\r\nwhere $N_I$ is distributed according to a two-dimensional standard normal distribution. From \\cite[Corollary 3.2]{BR10} one obtains that the right-hand side is bounded by a constant times\r\n$$\r\n \\|\\Sigma(s)^{-1}\\|^{1/2}_{op}\\|\\mathbb{E}[Z_s]-\\mathbb{E}[\\widetilde{Z}_s]\\|.\r\n$$\r\nNow (iii) from above and \\eqref{eqn:approximation_A_p} imply that this expression vanishes exponentially fast for $s\\to\\infty$, which concludes the proof.\r\n\t\t\\end{proof}\r\n\t\t\r\n\t\tTheorem \\ref{theorem:randompoly_var} and Theorem \\ref{theorem:randompoly_limit} especially provide a lower variance bound and a result on the multivariate normal approximation for the vector of surface area and volume of a random polytope since $A_0=dV_d$ and $A_1=S_{d-1}$, where $V_d$ and $S_{d-1}$ denote the volume and surface area, respectively. \r\n\t\t\r\n\t\tLower and upper variance bounds of the same order as in Theorem \\ref{theorem:randompoly_var} were already derived for the volume in \\cite{R05}. For binomial input, analogous variance bounds for intrinsic volumes were shown in \\cite{BFV10}. The case of an underlying Poisson process and, in particular, variance asymptotics for intrinsic volumes were discussed in \\cite{CSY13}. We expect that variance asymptotics for the $L^p$ surface area and especially the positivity of the asymptotic variance can be derived using the same method as in \\cite{CSY13}. However, the proof in \\cite{CSY13} cannot be directly transferred to the linear combination of two $L^p$ surface areas because for a linear combinations with scalars of different sign the monotonicity argument in \\cite[p. 100]{CSY13} does not work.\r\n\r\nIn \\cite{Grygierek21} the multivariate normal approximation of the vector of all intrinsic volumes and all numbers of lower-dimensional faces of the convex hull of Poisson points in a smooth convex body is considered. As in Theorem \\ref{theorem:randompoly_limit}, one compares with a multivariate normal distribution with the same covariance matrix, but as the so-called $d_3$-distance is studied no information about the regularity of the asymptotic covariance matrix is required. In the same work positive linear combinations of intrinsic volumes were considered since for coefficients with different signs it could not be ensured that the corresponding asymptotic variance is positive. For the special case of volume and surface area and an underlying ball, this problem is resolved by Theorem \\ref{theorem:randompoly_var}. In contrast to the findings in \\cite{Grygierek21}, Theorem \\ref{theorem:randompoly_limit} deals with non-smooth test functions and the obtained bounds are of a better order since a logarithmic factor could be removed. The rates of convergence derived in \\cite[Section 3]{LSY19} for the univariate normal approximation of intrinsic volumes in Kolmogorov distance are also of the order $s^{-(d-1)/(2(d+1))}$.\r\n\r\n\\begin{rema}\r\nThe results of this section prevail if we assume that the Poisson processes have underlying intensity measures $s\\mu$ for $s\\geq 0$, where $\\mu$ is a measure with a density $g:B^d(0,1)\\to[0,\\infty)$ satisfying $\\underline{c} \\leq g(x) \\leq \\overline{c}$ for all $x\\in B^d(0,1)$ and some constants $\\underline{c},\\overline{c}>0$ (see also Remark \\ref{rem:inhomogeneous_spatial_random_graphs}). Moreover, we expect that it is possible to replace the $d$-dimensional unit ball by a compact convex non-empty subset of $\\mathbb{R}^d$ with $C^2$-boundary and positive Gaussian curvature. Since the boundaries of these sets as the boundary of the unit ball are locally between two paraboloids, we believe that similar arguments as in \\cite[Subsection 3.4]{LSY19} allow to prove our results for this larger class of underlying bodies. However, we did not pursue this approach in order to not further increase the length and complexity of the proofs in this section.\r\n\\end{rema}\t\r\n\t\t\r\n\t\t\\section{Excursion sets of Poisson shot noise processes}\\label{sec:excursion_sets}\r\n\t\t\r\nExcursion sets of random fields are an important topic of probability theory and have many applications, for example in biology or engineering. For an introduction into this topic see for instance \\cite{AT07}. The most common underlying random fields are Gaussian random fields, but a further prominent choice are Poisson shot noise processes as we consider in this section.\t\r\n\t\t\r\n\t\tFor a stationary Poisson process $\\eta$ on $\\mathbb{R}^d$ with intensity measure $\\lambda_d$ and an integrable function $g:\\mathbb{R}^d\\to\\mathbb{R}$ let \r\n\t\t\\begin{align}\\label{equation:Poisson shot noise}\r\n\t\t\tf_{\\eta}(x)=\\sum_{y\\in\\eta}g(x-y)\r\n\t\t\\end{align}\r\n\t\tfor $x\\in\\mathbb{R}^d$. We denote $(f_\\eta(x))_{x\\in\\mathbb{R}^d}$ as Poisson shot noise process and note that it is translation invariant. Its excursion set at level $u>0$ consists of all $x\\in\\mathbb{R}^d$ such that $f_\\eta(x)\\geq u$. The corresponding volume of the excursion set in an observation window $B^d(0,s)$ with $s\\geq 1$ is given by\r\n\t\t\\begin{align*}\r\n\t\t\tF_s=\\lambda_d(\\{x\\in B^d(0,s):\tf_{\\eta}(x)\\geq u\\}).\r\n\t\t\\end{align*}\r\nNow one is interested in the behaviour of $F_s$ as $s\\to\\infty$, i.e.\\ if the observation window is increased. In \\cite{BST12} variance asymptotics and central limit theorems for the volume of excursion sets of quasi-associated random fields were considered, which include a large class of Poisson shot noise processes (see \\cite[Proposition 1]{BST12}). More recently, asymptotics for the variance and central limit theorems for the volume, the perimeter and the Euler characteristic of the excursion sets of Poisson shot-noise processes were shown in \\cite[Section 4]{L19}, while the paper \\cite{LPY20} studied the same questions for smoothed versions of volume and perimeter.\t\r\n\t\t\r\nWe use the following assumption on the kernel function $g$.\r\n\t\t\\begin{assu}\r\n\t\t\t\\label{assumption:g}\r\n\t\t\t There exist constants $\\underline{c}_g,\\overline{c}_g,\\delta,\\gamma>0$ and $c_g\\geq 1$ such that $\\delta+d/2>\\gamma\\geq\\delta>3d$ and\r\n\t\t\t \\begin{align*}\r\n\t\t\t \t \\underline{c}_g\\lVert x \\rVert^{-\\gamma}\\leq \\lvert g(x)\\rvert\\leq \\overline{c}_g\\lVert x\\rVert^{-\\delta} \r\n\t\t\t \\end{align*}\r\n\t\t for all $x\\in \\mathbb{R}^d$ with $\\lVert x\\rVert\\geq c_g$.\r\n\t\t\\end{assu}\r\n\r\nBy using our Theorem \\ref{thm:varbound}, we derive lower bounds for variances, which complement the findings from \\cite{BST12,L19}; see the discussion below for more details.\t\r\n\t\t\r\n\t\t\\begin{theorem}\r\n\t\t\t\\label{theorem:PSN process}\r\n\t\tLet $g:\\mathbb{R}^d\\to\\mathbb{R}$ be a continuous function with $g(0)>0$.\r\n\t\t\t\\begin{enumerate}\r\n\t\t\t\t\\item[a)] If $g$ fulfils Assumption \\ref{assumption:g}, there exists a constant $c>0$ such that\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\mathrm{Var}[F_s]\\geq cs^d\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\tfor $s\\geq 1$. \\label{poisson_shot_noise_a}\r\n\t\t\t\t\\item[b)] Assume that $g$ has compact support $S$.\r\n\t\t\t\tThen, there exists a constant $c>0$ such that\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\t\\mathrm{Var}[F_s]\\geq cs^d\r\n\t\t\t\t\\end{align*}\r\n\t\t\t\tfor $s\\geq 1$.\r\n\t\t\t\\end{enumerate}\r\n\t\t\\end{theorem}\r\n\r\nReplacing $g$ by $g(\\cdot-z)$ for any $z\\in\\mathbb{R}^d$ leads to a translation of the Poisson shot noise field and, thus, by translation invariance, to a Poisson shot noise process with the same distribution. Thus, the assumption $g(0)>0$ is no loss of generality because any $g$ that can take positive values can be modified accordingly, while the case of a non-positive function $g$ is trivial because then the level set for $u>0$ becomes empty.\r\n\r\nSince the volume of the excursion set can be written as integral over indicator functions, one obtains with Fubini's theorem and translation invariance of the Poisson shot noise process\r\n\\begin{align*}\r\n\\mathrm{Var}[F_s] & = \\E\\left[ \\left( \\int_{B^d(0,s)} \\mathbf{1}\\{f_\\eta(x)\\geq u\\} \\,\\mathrm{d}x \\right)^2 \\right] - \\E\\left[ \\int_{B^d(0,s)} \\mathbf{1}\\{f_\\eta(x)\\geq u\\} \\,\\mathrm{d}x \\right]^2 \\\\\r\n& = \\int_{B^d(0,s)} \\int_{B^d(0,s)} \\p( f_\\eta(x_1)\\geq u, f_\\eta(x_2)\\geq u ) - \\p( f_\\eta(x_1)\\geq u) \\p(f_\\eta(x_2)\\geq u ) \\; \\mathrm{d}x_1 \\; \\mathrm{d}x_2 \\\\\r\n& = \\int_{\\mathbb{R}^d} \\lambda_d(\\{ y\\in\\mathbb{R}^d: y,y+z\\in B^d(0,s) \\}) \\\\\r\n& \\quad \\quad \\quad \\times \\left( \\p( f_\\eta(0)\\geq u, f_\\eta(z)\\geq u ) - \\p( f_\\eta(0)\\geq u) \\p(f_\\eta(z)\\geq u ) \\right) \\; \\mathrm{d}z.\r\n\\end{align*}\r\nNote that $\\lambda_d(\\{ y\\in\\mathbb{R}^d: y,y+z\\in B^d(0,s) \\})/\\lambda_d(B^d(0,s))\\leq 1$ for all $z\\in\\mathbb{R}^d$ and that it converges to one as $s\\to\\infty$ for all $z\\in\\mathbb{R}^d$. Thus, the dominated convergence theorem yields\r\n$$\r\n\\lim_{s\\to\\infty} \\frac{\\mathrm{Var}[F_s]}{\\lambda_d(B^d(0,s))} = \\int_{\\mathbb{R}^d} \\p( f_\\eta(0)\\geq u, f_\\eta(z)\\geq u ) - \\p( f_\\eta(0)\\geq u) \\p(f_\\eta(z)\\geq u ) \\; \\mathrm{d}z\r\n$$\r\nif the integral on the right-hand side is well-defined. However, this explicit formula for the asymptotic variance does not imply the statement of Theorem \\ref{theorem:PSN process} since the difference under the integral could take both negative and positive values in such a way that the integral becomes zero.\r\n\r\nSince statements of the form that the variance is at least of the order of the volume of the observation window as in Theorem \\ref{theorem:PSN process} were already proven in \\cite[Proposition 1]{BST12} and \\cite[Theorem 4.1]{L19}, let us compare the assumptions of Theorem \\ref{theorem:PSN process} a) with those made before. In \\cite[Proposition 1]{BST12}, it is required that $g$ is a bounded and uniformly continuous function on $\\mathbb{R}^d$ with $\\lvert g(x)\\rvert\\leq c\\lVert x\\rVert^\\alpha$ for some constant $c>0$ and $\\alpha>3d$ (as in our Assumption \\ref{assumption:g}). A crucial difference is that we allow $g$ to take positive and negative values, while it has to be non-negative in \\cite{BST12}, where this assumption might be essential since it ensures that the Poisson shot noise process is positively associated. A lower bound on the decay of $|g|$ as in Assumption \\ref{assumption:g} is not present in \\cite{BST12}, but we use it only to ensure the boundedness of the density of $f_\\eta(0)$, which is supposed in \\cite{BST12}. The result in \\cite{BST12} deals with marks in the sense that in \\eqref{equation:Poisson shot noise} each summand is multiplied by an i.i.d.\\ copy of a non-negative random variable. It might be possible to generalise our results in this direction as well. The assumptions in \\cite[Theorem 4.1]{L19} seem to be more restrictive than in our case. So it is supposed that $g$ depends only on the norm of its argument and that $|g(x)|$ has an upper bound as in Assumption $1$ but with $\\delta=11d$. Instead a lower bound on $|g|$, a rather technical assumption (see (4.3) in \\cite{L19}) is made, which even requires differentiability of $g$. We are not aware of any results dealing with the situation of part b) of Theorem \\ref{theorem:PSN process}. The compact support implies that $f_\\eta(0)$ does not possess a density.\r\nWe prepare the proof of Theorem \\ref{theorem:PSN process} with the following lemma.\r\n\r\n\t\\begin{lemma}\r\n\t\t\\label{lemma_density}\r\n\t\tLet $g:\\mathbb{R}^d\\to\\mathbb{R}$ be a continuous, bounded function with $g(0)>0$ that fulfils Assumption \\ref{assumption:g}. Then, $f_\\eta(x)$ has a bounded density for $x\\in \\mathbb{R}^d$.\r\n\t\\end{lemma}\r\n\t\\begin{proof}\r\n\t\tWe use the fact that $f_\\eta(x)$ has a bounded density if its characteristic function $\\varphi$ is integrable.\r\n\t\tBy \\cite[Chapter 1, Lemma 3.7]{BS07} the characteristic function of $f_\\eta(x)$ is given by\r\n\t\t\\begin{align*}\r\n\t\t\t\\varphi(t)=\\exp\\left[-\\int_{\\mathbb{R}^d}1-e^{\\mathbf{i}tg(x-y)}\\;\\mathrm{d}y\\right],\r\n\t\t\\end{align*}\r\n\t\twhere $\\mathbf{i}$ is the imaginary unit. Thus, $f_\\eta(x)$ has a bounded density if \r\n\t\t\\begin{align*}\r\n\t\t\t\\int_\\mathbb{R}\\lvert\\varphi(t)\\rvert\\;\\mathrm{d}t=\\int_\\mathbb{R}\\Big\\lvert\\exp\\Big[-\\int_{\\mathbb{R}^d} 1-e^{\\mathbf{i}tg(x-y)} \\;\\mathrm{d}y\\Big]\\Big\\rvert\\;\\mathrm{d}t<\\infty.\t\r\n\t\t\\end{align*}\r\n\t\tChoose $c>0$ small enough such that $1-\\cos(\\hat{x})=\\sum_{k=1}^{\\infty}(-1)^{k+1}\\frac{{\\hat{x}}^{2k}}{(2k)!}\\geq \\frac{\\hat{x}^2}{4}$ for $\\hat{x}\\in[-c,c]$. Then it holds\r\n\t\t\r\n\t\t\\begin{align*}\r\n\t\t\t\\int_{\\mathbb{R}^d}1-\\cos(tg(x-y))\\;\\mathrm{d}y\r\n\t\t\t&\\geq\\int_{\\{z\\in\\mathbb{R}^d: t^2g(x-z)^2\\leq c^2,\\lVert x-z\\rVert\\geq c_g\\}}\\frac{(tg(x-y))^{2}}{4}\\;\\mathrm{d}y\\\\\r\n\t\t\t&\\geq \\int_{\\{z\\in\\mathbb{R}^d: t^2\\overline{c}_g^2\\lVert x-z\\rVert^{-2\\delta}\\leq c^2,\\lVert x-z\\rVert\\geq c_g\\}}\\frac{t^2\\underline{c}_g^2\\lVert x-y\\rVert^{-2\\gamma}}{4}\\;\\mathrm{d}y\\\\\r\n\t\t\t&\\geq \\frac{d\\kappa_dt^2\\underline{c}_g^2}{4}\\int_{\\max\\left\\{\\left(t\\overline{c}_g/c\\right)^{1/\\delta},c_g\\right\\}}^\\infty r^{-2\\gamma}r^{d-1}\\;\\mathrm{d}r\\\\&=\\frac{d\\kappa_dt^2\\underline{c}_g^2}{4(2\\gamma-d)}\\cdot\\max\\left\\{\\left(t\\overline{c}_g/c\\right)^{1/\\delta},c_g\\right\\}^{(d-2\\gamma)}\r\n\t\t\\end{align*}\r\n\t\tand, therefore,\r\n\t\t\\begin{align*}\r\n\t\t\t\\int_\\mathbb{R}\\lvert\\varphi(t)\\rvert\\;\\mathrm{d}t&=\\int_\\mathbb{R}\\Big\\lvert\\exp\\Big[-\\int_{\\mathbb{R}^d}1-e^{\\mathbf{i}tg(x-y)}\\;\\mathrm{d}y\\Big]\\Big\\rvert\\;\\mathrm{d}t\\\\&=2\t\\int_{\\mathbb{R}_+}\\exp\\Big[-\\int_{\\mathbb{R}^d}1-\\cos(tg(x-y))\\;\\mathrm{d}y\\Big]\\;\\mathrm{d}t\\\\\t\r\n\t\t\t&\\leq 2\\int_{\\mathbb{R}_+}\\exp\\left[-\\frac{d\\kappa_dt^2\\underline{c}_g^2}{4(2\\gamma-d)}\\cdot\\max\\left\\{\\left(t\\overline{c}_g/c\\right)^{1/\\delta},c_g\\right\\}^{(d-2\\gamma)}\\right]\\;\\mathrm{d}t\\\\\r\n\t\t\t&= 2 \\int_{0}^{c_g^{\\delta}c/{\\overline{c}_g}}\\exp[-c_{1,\\gamma,\\delta,d} t^{2}]\\;\\mathrm{d}t+ 2\\int_{c_g^{\\delta}c/{\\overline{c}_g}}^\\infty\\exp[-c_{2,\\gamma,\\delta,d}t^{(2(\\delta-\\gamma)+d)/\\delta}]\\;\\mathrm{d}t\\\\&<\\infty\r\n\t\t\\end{align*}\r\n\t\twith suitable constants $c_{1,\\gamma,\\delta,d},c_{2,\\gamma,\\delta,d}>0$ since $\\delta-\\gamma+d/2>0$. This shows that $f_x(\\eta)$ has a bounded density.\r\n\t\\end{proof}\r\n\t\t\\begin{proof}[Proof of Theorem \\ref{theorem:PSN process}]\r\n\t\t\tSince $g$ is continuous and $g(x)\\to0$ as $\\lVert x\\rVert\\to\\infty$, there exists a ball $B^d(\\hat{t},r)$ with centre $\\hat{t}\\in \\mathbb{R}^d$ and radius $r>0$ such that $g(t)\\in[c_1,c_2]$ for all $t\\in B^d(\\hat{t},r)$ with\r\n\t\t\t$00$ small enough such that \r\n\t\t\t\\begin{align*}\r\n\t\t\t\tu-c_3>\\frac{c_2}{c_1}(u-g(0)+2c_1+c_3).\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $k\\in\\mathbb{N}_0$ be such that $\\frac{u-g(0)+c_1+c_3}{c_1}\\leq k< \\frac{u-c_3}{c_2}$. Note that such a $k$ exists because $c_1,c_2$ and $c_3$ are chosen in such a way that $\\frac{u-g(0)+c_1+c_3}{c_1}+1=\\frac{u-g(0)+2c_1+c_3}{c_1}<\\frac{u-c_3}{c_2}$ and $\\frac{u-c_3}{c_2}>0$. Therefore, together with \\eqref{x-y in B(t,r)},\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\p(\tf_{\\eta}(x)0.\r\n\t\t\t\\end{align*}\t\r\n\r\n\t\t\tFor b) we define $\\tilde{S}=\\{y\\in \\mathbb{R}^d:x-y\\in S\\; \\text{for some } x\\in B^d(z,\\varepsilon)\\}$. Note that $B^d(z-\\hat{t},r-\\varepsilon)\\subseteq\\tilde{S}$ because $x-y\\in B^d(\\hat{t},r)\\subseteq S$ for all $x\\in B^d(z,\\varepsilon)$ and $y\\in B^d(z-\\hat{t},r-\\varepsilon)$. Then it follows\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\p(\tf_{\\eta}(x)0$ small enough such that \t\r\n\t\t\t\\begin{align*}\r\n\t\t\t\tu>\\frac{c_2}{c_1}(u-g(0)+2c_1).\r\n\t\t\t\\end{align*}\r\n\t\t\tLet $k\\in\\mathbb{N}_0$ be such that $\\frac{u-g(0)+c_1}{c_1}\\leq k<\\frac{u}{c_2}$. Note that such a $k$ exists because $c_1$ and $c_2$ are chosen in such a way that $\\frac{u-g(0)+c_1}{c_1}+1=\\frac{u-g(0)+2c_1}{c_1}<\\frac{u}{c_2}$ and $\\frac{u}{c_2}>0$. Then,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t&\\p(\tf_{\\eta}(x)0.\r\n\t\t\t\\end{align*}\r\n\t\t\tAltogether, for $A_s=\\{z\\in \\mathbb{R}^d:B^d(z,\\varepsilon)\\subset B^d(0,s)\\}$ and $p=p_a$ in case of a) or $p=p_b$ in case of b) we conclude that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t\\E\\left[\\int(D_zF_s)^2\\;\\mathrm{d}z\\right]&\\geq \\kappa_{d}^2\\varepsilon^{2d}\\int_{\\mathbb{R}^d}\\p(D_zF_s\\geq\\kappa_d\\varepsilon^d)\\;\\mathrm{d}z\\\\&\\geq \\kappa_{d}^2\\varepsilon^{2d}\\int_{A_s}\\p(\tf_{\\eta}(x)0$.\r\n\t\t\t\r\n\t\t\tIn the following we consider the second-order difference operator to check \\eqref{condition}. For $z_1,z_2\\in\\mathbb{R}^d$ with $z_1\\neq z_2$ we have\r\n\t\t\t$$\r\n\t\t\tD^2_{z_1,z_2}F_s = \\int_{B^d(0,s)} D^2_{z_1,z_2}\\mathbf{1}\\{f_\\eta(x)\\geq u\\} \\;\\mathrm{d}x\r\n\t\t\t$$\r\n\t\t\tso that\r\n\t\t\t\\begin{equation}\\label{eqn:D2_F_s}\r\n\t\t\t|D^2_{z_1,z_2}F_s| \\leq 2 \\lambda_d(B_s(z_1,z_2))\r\n\t\t\t\\end{equation}\r\n\t\t\twith $B_s(z_1,z_2)=\\{x\\in B^d(0,s): D^2_{z_1,z_2}\\mathbf{1}\\{f_\\eta(x)\\geq u\\}\\neq 0 \\}$, where we used the bound $|D^2_{z_1,z_2}\\mathbf{1}\\{f_\\eta(x)\\geq u\\}|\\leq 2$. The inequality \\eqref{eqn:D2_F_s} leads to\r\n\t\t\t$$\r\n\t\t\tI:=\\E\\left[\\int_{\\mathbb{R}^d}\\int_{\\mathbb{R}^d}(D^2_{z_1,z_2}F_s)^2\\;\\mathrm{d}z_1\\;\\mathrm{d}z_2\\right] \\leq 4 \\int_{\\mathbb{R}^d}\\int_{\\mathbb{R}^d}\\E\\left[\\lambda_d(B_s(z_1,z_2))^2\\right]\\;\\mathrm{d}z_1\\;\\mathrm{d}z_2.\r\n\t\t\t$$\r\n\t\t\t\r\n\t\t\tFirst we study the situation of a). Let $x\\in B^d(0,s)$ and assume that $|g(x-z_2)|\\leq |g(x-z_1)|$. Since\r\n\t\t\t\\begin{align*}\r\n\t\t\tD^2_{z_1,z_2}\\mathbf{1}\\{f_\\eta(x)\\geq u\\} & = \\mathbf{1}\\{f_\\eta(x)+g(x-z_1)+g(x-z_2)\\geq u\\} - \\mathbf{1}\\{f_\\eta(x)+g(x-z_1)\\geq u\\} \\\\\r\n\t\t\t& \\quad - ( \\mathbf{1}\\{f_\\eta(x)+g(x-z_2)\\geq u\\} - \\mathbf{1}\\{f_\\eta(x)\\geq u\\} ),\r\n\t\t\t\\end{align*}\r\n\t\t\twe obtain that\r\n\t\t\t$$\r\n\t\t\tD^2_{z_1,z_2}\\mathbf{1}\\{f_\\eta(x)\\geq u\\} = 0\r\n\t\t\t$$\r\n\t\t\tif\r\n\t\t\t$$\r\n\t\t\tf_\\eta(x)+g(x-z_1) \\notin [u-|g(x-z_2)|,u+|g(x-z_2)|]\r\n\t\t\t$$\r\n\t\t\tand\r\n\t\t\t$$\r\n\t\t\tf_\\eta(x) \\notin [u-|g(x-z_2)|,u+|g(x-z_2)|].\r\n\t\t\t$$\r\n\t\t\tTogether with the fact that the density of $f_\\eta(x)$ is bounded by a constant $C_1>0$, which was shown in Lemma \\ref{lemma_density}, we derive\r\n\t\t\t\\begin{align*}\r\n\t\t\t\\p(x\\in B_s(z_1,z_2)) & \\leq \\p( f_\\eta(x)+g(x-z_1) \\in [u-|g(x-z_2)|,u+|g(x-z_2)|] ) \\\\\r\n\t\t\t& \\quad + \\p(f_\\eta(x) \\in [u-|g(x-z_2)|,u+|g(x-z_2)|]) \\\\\r\n\t\t\t& \\leq 4 C_1 |g(x-z_2)|.\r\n\t\t\t\\end{align*}\r\n\t\t\tUsing the same arguments for $|g(x-z_2)|\\geq |g(x-z_1)|$, we deduce\r\n\t\t\t$$\r\n\t\t\t\\p(x\\in B_s(z_1,z_2)) \\leq 4 C_1 \\min\\{|g(x-z_1)|,|g(x-z_2)|\\}\r\n\t\t\t$$\r\n\t\t\tso that with H\\\"older's inequality and the inequality $\\min\\{a,b\\}\\leq \\sqrt{a}\\sqrt{b}$ for $a,b\\geq 0$,\r\n\t\t\t\\begin{align*}\r\n\t\t\t\t& \\E\\left[\\lambda_d(B_s(z_1,z_2))^2\\right] \\\\\r\n\t\t\t\t&=\\int_{B^d(0,s)}\\int_{B^d(0,s)}\\p(x_1\\in B_s(z_1,z_2),x_2\\in B_s(z_1,z_2))\\;\\mathrm{d}x_1\\;\\mathrm{d}x_2\\\\&\\leq \\int_{B^d(0,s)}\\int_{B^d(0,s)} \\p(x_1\\in B_s(z_1,z_2))^{2/3}\\p(x_2\\in B_s(z_1,z_2))^{1/3}\\;\\mathrm{d}x_1\\;\\mathrm{d}x_2\\\\\r\n\t\t\t\t&\\leq 4C_1 \\int_{B^d(0,s)}\\int_{B^d(0,s)}\\lvert g(x_1-z_1)\\rvert^{1/3}\\lvert g(x_1-z_2)\\rvert^{1/3}\\lvert g(x_2-z_1)\\rvert^{1/3}\\;\\mathrm{d}x_1\\;\\mathrm{d}x_2.\r\n\t\t\t\\end{align*}\r\n\t\t\tFrom Assumption \\ref{assumption:g} and the continuity of $g$ it follows that $g$ is bounded by a constant $C_2>0$. Using the decay of $|g|$ and $\\delta>3d$ in Assumption \\ref{assumption:g}, we have for $x\\in B^d(0,s)$ that\r\n\t\t\t\\begin{align*}\r\n\t\t\t\\int_{\\mathbb{R}^d} \\lvert g(x-z)\\rvert^{1/3}\\;\\mathrm{d}z&=\t\\int_{\\mathbb{R}^d\\backslash B^d(x,c_g)} \\lvert g(x-z)\\rvert^{1/3}\\;\\mathrm{d}z+\\int_{B^d(x,c_g)} \\lvert g(x-z)\\rvert^{1/3}\\;\\mathrm{d}z\\\\\r\n\t\t\t&\\leq \\int_{\\mathbb{R}^d\\backslash B^d(x,c_g)} \\overline{c}_g^{1/3}\\lVert x-z\\rVert^{-\\delta/3}\\;\\mathrm{d}z+C_2^{1/3}\\kappa_{d}c_g^d\\\\\r\n\t\t\t&= d\\kappa_{d}\\overline{c}_g^{1/3}\\int_{c_g}^\\infty r^{d-1}r^{-\\delta/3}\\;\\mathrm{d}r+C_2^{1/3}\\kappa_{d}c_g^d\\\\\r\n\t\t\t&=d\\kappa_{d}\\overline{c}_g^{1/3}\\frac{c_g^{d-\\delta/3}}{\\delta/3-d}+C_2^{1/3}\\kappa_{d}c_g^d=:C_3.\r\n\t\t\t\\end{align*}\r\n\t\t\t\t\t\tThe same estimate holds for $\\int_{B^d(0,s)} \\lvert g(x-z)\\rvert^{1/3}\\;\\mathrm{d}x$ for $z\\in\\mathbb{R}^d$. Hence,\r\n\t\t\t\\begin{align*}\r\n\t\t\tI&\\leq \\int_{\\mathbb{R}^d}\\int_{\\mathbb{R}^d}16 C_1 \\int_{B^d(0,s)}\\int_{B^d(0,s)}\\lvert g(x_1-z_1)\\rvert^{1/3}\\lvert g(x_1-z_2)\\rvert^{1/3}\\\\&\\hspace{6.5cm}\\times\\lvert g(x_2-z_1)\\rvert^{1/3}\\;\\mathrm{d}x_1\\;\\mathrm{d}x_2\\;\\mathrm{d}z_1\\;\\mathrm{d}z_2\\\\\r\n\t\t\t\t&=16 C_1 \\int_{B^d(0,s)}\\int_{\\mathbb{R}^d}\\lvert g(x_1-z_1)\\rvert^{1/3}\\int_{B^d(0,s)}\\lvert g(x_2-z_1)\\rvert^{1/3}\\\\&\\hspace{6.5cm}\\times\\int_{\\mathbb{R}^d}\\lvert g(x_1-z_2)\\rvert^{1/3} \r\n\t\t\t\t\\;\\mathrm{d}z_2\\;\\mathrm{d}x_2\\;\\mathrm{d}z_1\\;\\mathrm{d}x_1\\\\\r\n\t\t\t\t&\\leq 16 C_1 \\int_{B^d(0,s)}C_3^3\\;\\mathrm{d}x_1=: \\tilde{c}_1s^d.\r\n\t\t\t\\end{align*}\r\n\t\t\t\r\n\t\t\tFor b) let $\\widetilde{R}>0$ be such that $S\\subseteq B^d(0,\\widetilde{R})$ and let $z_1,z_2\\in\\mathbb{R}^d$. Then, since\r\n\t\t\t\t\\begin{align*}\r\n\t\t\t\t\tB_s(z_1,z_2) \\subseteq \\{x\\in B^d(0,s):\\lVert x-z_1\\rVert\\leq \\widetilde{R},\\lVert x-z_2\\rVert\\leq \\widetilde{R}\\},\r\n\t\t\t\t\\end{align*}\r\n\t\t\tit follows\r\n\t\t\t\\begin{align*}\r\n\t\t\t\\E\\left[\\lambda_d(B_s(z_1,z_2))^2\\right] \\leq \\lambda_d(\\{x\\in B^d(0,s):\\lVert x-z_1\\rVert\\leq \\widetilde{R},\\lVert x-z_2\\lVert\\leq \\widetilde{R}\\})^2.\r\n\t\t\t\\end{align*}\r\n\t\t\tThe triangle inequality implies $\\lambda_d(\\{x\\in B^d(0,s):\\lVert x-z_1\\rVert\\leq \\widetilde{R},\\lVert x-z_2\\lVert\\leq \\widetilde{R}\\})=0$ for $\\lVert z_1-z_2\\rVert>2\\widetilde{R}$ or $\\lVert z_2\\rVert> s+\\widetilde{R}$ and therefore\r\n\t\t\t\\begin{align*}\r\n\t\t\tI &\\leq 4 \\int_{B^d(0,s+\\widetilde{R})}\\int_{B^d(z_2,2\\widetilde{R})} \\lambda_d(\\{x\\in B^d(0,s):\\lVert x-z_1\\rVert\\leq \\widetilde{R},\\lVert x-z_2\\lVert\\leq \\widetilde{R}\\})^2\\;\\mathrm{d}z_1\\;\\mathrm{d}z_2\\\\&\\leq 4\\int_{B^d(0,s+\\widetilde{R})}\\int_{B^d(z_2,2\\widetilde{R})} (\\kappa_d\\widetilde{R}^d)^2\\;\\mathrm{d}z_1\\;\\mathrm{d}z_2\\leq 4(\\kappa_d\\widetilde{R}^d)^2\\kappa_d^2(2\\widetilde{R})^d(s+\\widetilde{R})^d\\\\&\\leq Cs^d\r\n\t\t\t\\end{align*}\r\n\t\tfor a suitable constant $C>0$, which completes together with Theorem \\ref{thm:varbound} the proof.\r\n\t\t\\end{proof}\r\n\t\t\r\n"} +{"id": "red-arxiv-17", "source_id": "red-arxiv_17_red-arxiv-17", "type": "paper", "source_dataset": "red-arxiv", "title": "", "meta_data": "", "text": "\\section*{Chain of Josephson junctions}\nIn the following, we consider a chain of Josephson junctions arranged as $N \\in \\mathbb{N}$ superconducting loops, see Fig.~\\ref{Fig1}a. \nEach loop contains two Josephson junctions with Josephson energies $E_{J_1}$ and $E_{J_2}$, and is threaded by a piece-wise increasing flux with $\\varphi_n = \\dot{\\varphi}_x t + n \\varphi_{\\text{off}}$ threading the $n$-th loop.\nAdditionally, a voltage $V_y = \\hbar \\dot{\\varphi}_y / (2e)$ is applied to the chain inducing an additional flux at the boundary of the circuit. \nIn general, such superconducting circuits are described by the Hamiltonian $H = E_{C} (\\hat{\\bm{n}}-\\bm{n}_g) \\bm{c}^{-1} (\\hat{\\bm{n}}-\\bm{n}_g) + U(\\hat{\\bm{\\phi}},\\varphi_n)$ \\cite{devoret1995quantum,Vool2017}, \nwhere $E_C = (2e)^2/C$ is the charging energy, which we assume to be homogeneous for simplicity, i.e., $E_{C_j} \\equiv E_C$ for all junctions $j$. \nFurthermore, $\\hat{\\bm{n}} = (n_1,...,n_{N-1})$ are the node charges of the circuit, where $\\bm{n}_g = \\bm{C}_g \\bm{V}_g / (2e)$ is the number of gate charges. The latter is controlled by capacitively coupling the nodes to gate voltages $\\bm{V}_g$ with gate capacitances $\\bm{C}_g$. The inverse dimensionless capacitance matrix $\\bm{c}^{-1}$ and $U(\\hat{\\bm\\phi},\\varphi_n)$ summarize the Josephson energies of the circuit, see Methods section for further details.\n\nImportantly, in the limit of $E_C \\gg E_{J_i}$, the areas between the superconducting loops act as superconducting islands that are controlled by gate charges $\\bm{n}_g$. \nDue to the capacitive coupling between neighboring islands\n, the charging energy $E_C$ plays the role of the interaction strength between Cooper pairs on those islands, whereas $\\bm{c}^{-1}$ determines the length scale of this interaction. The latter decreases linearly for the circuit shown in Fig.~\\ref{Fig1}a.\nConsequently, for $\\bm{n}_g = \\frac{1}{N}$, states with zero and a single excess Cooper pair on the islands are charge-degenerate and allow for a finite supercurrent through the chain. \nThese low-energy states are described by the Hamiltonian\n\\begin{multline}\nH_0\n=\n\\sum_{n=1}^{N} \\left(\\frac{E_{J_2}}{2}+\\frac{E_{J_1}}{2}e^{i(\\dot{\\varphi}_xt+n\\varphi_{\\text{off}})}\\right) \\ket{n-1}\\bra{n} \n\\\\\n+ e^{-i\\dot{\\varphi}_yt} \\left(\\frac{E_{J_2}}{2}+\\frac{E_{J_1}}{2}e^{i(\\dot{\\varphi}_x t+N\\varphi_{\\text{off}})}\\right) \\ket{N}\\bra{0} +\\text{h.c.} ,\n\\label{eq:model}\n\\end{multline}\nwhere $\\ket{n}$ is the state with a single Cooper pair on the $n$-th island. \nIn this limit, the excess Cooper pair can move between different islands with a hopping amplitude that depends on the applied fluxes and the Josephson energies $E_{J_i}$. \nExcited states of higher energy with two or more Cooper pairs on the islands are gapped out by an energy gap proportional to $E_C$. \n\n\\subsection*{Fractional transconductance}\nAs we shall see in the following, the voltage biases $2eV_\\ell = \\hbar \\dot{\\varphi}_\\ell$ ($\\ell = x,y$) linearly scan the fluxes over time, leading to topological pumping of the Copper pairs. The quantization of the pumped charge will depend on the offset flux $\\varphi_{\\text{off}}$.\nThe main result of this work is the manifestation of a fractional quantization of the transconductance across the device\n\\begin{align}\n\t\\braket{G_{yx}}_{T \\rightarrow \\infty} = \\frac{C_{xy}}{\\nu} \\, G_0 \n\t\\label{eq:trans}\n\\end{align} \nin the long-time average, as shown in Fig.~\\ref{Fig1}b. \nHere, $G_0 = 4e^2 / h$ is the conductance quantum for superconducting systems and $C_{xy}=1$ is the first Chern number of the total ground-state band.\n\nTo understand the origin of such fractional quantization, let us first qualitatively look at the ground-state of the chain that has a particular form at certain points of the offset flux $\\varphi_{\\text{off}} = 2\\pi\\nu / N$ for $\\nu \\in \\mathbb{N}$, see Fig~\\ref{Fig2}. \nAt $\\nu = 1$, there is a single ground state with a single Cooper pair localized in the chain, see Fig.~\\ref{Fig1}c. \nThe spatial location of the Cooper pair is controlled by the flux $\\varphi_x$, which is linearly changed in time via the voltage bias $V_x$. This leads to an adiabatic transfer of this Cooper pair along the chain as long as $2eV_x = \\hbar \\dot{\\varphi}_x \\ll E_{\\text{inter}}$ is small enough with respect to the gap to the excited states $E_\\text{inter}$. \nIncreasing the offset flux to $\\nu = 2$, the Cooper pair in the ground-state is delocalized on two distinct locations in the chain, see Figs.~\\ref{Fig1}c and~\\ref{Fig2}a. \nAs a function of the linearly changing $\\varphi_x$, the delocalized Cooper pair is again transported across the chain.\nHowever, this time and in contrast to the $\\nu = 1$ case, the Cooper pair is only transported half the distance due to its bipartite delocalization. \nSimilarly, for $\\nu = 3$, the ground-state Cooper pair is delocalized at three distinct locations in the chain and the pumped charge is only a third compared to the case $\\nu = 1$.\nThis adiabatic transport is connected with the topological properties of the ground-state band by applying two small incommensurate voltages $V_x = \\hbar\\dot{\\varphi}_x / (2e) = r V_y$ in the spirit of the integer case proposed in Ref.~\\cite{riwar2016multi}, where $r \\in \\mathbb{R}\\setminus\\mathbb{Q}$.\n\\begin{figure*} \n\t\\includegraphics[width=0.75\\textwidth]{images/Fig2new.png}\n\n\t\\caption{\n\t\t\\textbf{a}, Ground-state band and first excited states [cf.~Eq.~\\eqref{eq:model}] for $\\varphi_{\\text{off}} = 2\\pi \\nu / N$ with $\\nu = 2$, $N=12$, $E_{J_2} = 2E_{J_1}$, and $\\varphi_x=0$. \n\t\t%\n\t\tThere is an energy gap $E_{\\text{inter}}$ between the ground-state band and other excited states. \n\t\t%\n\t\tBy changing $\\varphi_{\\text{off}}$ from $\\nu = 2$ to $\\nu =3$, one of the excited states is pushed toward the ground-state band (see arrow and panel \\textbf{c}). \n\t\t%\n\t\t\\textbf{b}, Zoom in on the ground-state band for $\\nu = 2$ as a function of $\\varphi_x$ and $\\varphi_y$ with the same parameters as in panel \\textbf{a}.\n\t\t%\n\t\tThe adiabatic state evolution is indicated by the black arrow for finite voltages $V_\\ell = \\hbar \\dot{\\varphi}_\\ell / (2e)$. \n\t\t%\n\t\t\\textbf{c}, Same as panel \\textbf{b} for $\\nu = 3$. \n\t\t%\n\t\tAn additional energy level moved from the excited states toward the ground-state band leading to a $6\\pi$-periodicity of the whole band. The width of the lowest band $E_{\\text{width}}$ increases with increasing $\\nu$.\n\t\t%\n\t\n\t\t%\n\t\t\\textbf{d}, Applying incommensurate voltages $V_x$ and $V_y$ leads to a non-periodic time evolution $(\\varphi_x(t) , \\varphi_y(t) )$ for $t \\in [0,T]$ that covers the whole torus for long times $T \\to \\infty$.\n\t\n\t\t}\n\t\\label{Fig2}\n\\end{figure*}\n\n\nTo understand this result on a quantitative level, we first have a closer look on the specific energy spectrum for an offset flux $\\varphi_{\\text{off}} = 2\\pi\\nu / N$. \nFor $\\nu=1$, there is a single ground state and the instantaneous supercurrent through the chain to first order in the applied voltages is given by $I = 2e \\, \\partial_{\\varphi_y} E_0 /\\hbar + 2e \\dot{\\varphi}_x F_{xy}^0$.\nHere, $F_{xy}^0 = i \\braket{\\partial_{\\varphi_x}\\psi^0|\\partial_{\\varphi_y}\\psi^0} - i\\braket{\\partial_{\\varphi_y}\\psi^0|\\partial_{\\varphi_x}\\psi^0}$ is the Berry curvature of the ground state $\\ket{\\psi^0}$. \nBy applying two incommensurate voltages $V_x$ and $V_y$, the ground state $\\ket{\\psi^0(\\varphi_x,\\varphi_y)}$ adiabatically evolves in the periodic space of fluxes $(\\varphi_x,\\varphi_y)$ according to the second Josephson relation $2e V_\\ell = \\hbar\\dot{\\varphi}_\\ell$. \nAs a result, the time-averaged current through the chain after long averaging times $T \\to \\infty$ is given by an average over the fluxes $\\braket{I}_{T \\rightarrow \\infty} = \\braket{I}_{\\varphi_x,\\varphi_y}$.\nSince $\\braket{\\partial_{\\varphi_y} E_0}_{\\varphi_y} = 0$, the only contribution is given by the Berry curvature $\\braket{F_{xy}^0}_{\\varphi_x, \\varphi_y}$, which leads to the integer-quantized transconductance $\\braket{G_{yx}}_{T \\rightarrow \\infty} = G_0 C_{xy}$ in terms of the ground-state Chern number $C_{xy} = \\iint_0^{2\\pi} F_{xy}^0 d\\varphi_x d\\varphi_y/ (2\\pi) = 1$, cf.~Ref.~\\cite{riwar2016multi}.\n\nFor $\\nu=2$, there are two states in the lowest band with a $4\\pi$-periodicity in the $\\varphi_y$-direction, see Fig.~\\ref{Fig2}a. \nThe Chern number of this band is given by $C_{xy} = \\iint_0^{2\\pi} \\text{tr}(F_{xy}^0) d\\varphi_x d\\varphi_y /(2\\pi) =1$, where $[F_{xy}^0]_{mm} = i \\braket{\\partial_{\\varphi_x}\\psi_m^0|\\partial_{\\varphi_y}\\psi_m^0}-i \\braket{\\partial_{\\varphi_y}\\psi_m^0|\\partial_{\\varphi_x}\\psi_m^0}$ is the Berry curvature of the $m$-th eigenstate $\\ket{\\psi_m^0}$ of the lowest band. \nApplying again a pair of incommensurate voltages, this state will adiabatically evolve in the periodic space of $(\\varphi_x,\\varphi_y)$ in the lowest band, see Figs.~\\ref{Fig2}b and \\ref{Fig2}d. \nHence, due to the increased $4\\pi$-periodicity along the $\\varphi_y$-axis, there will be an additional factor $1/2$ in the time-averaged current that leads to the fractional transconductance $\\braket{G_{yx}}_{\\varphi_x,\\varphi_y} = C_{xy} G_0/2$. \nBy further increasing the offset flux to $\\nu=3$, another state will be pushed toward the lowest energy band such that there are three states with a $6\\pi$-periodicity along the $\\varphi_y$-axis, compare Figs.~\\ref{Fig2}b and \\ref{Fig2}c. \nAgain, this increased periodicity leads to an additional factor $1/3$ in the time-averaged transconductance. \nOverall, by tuning the offset flux $\\varphi_{\\text{off}}$ such that $\\nu \\to \\nu + 1$, an additional state will be pushed toward the lowest energy band, such that there are $\\nu+1$ states in the lowest band with a $2\\pi(\\nu+1)$ periodicity in $\\varphi_y$-direction. \nCorrespondingly, the time-averaged transconductance at the particular points $\\varphi_{\\text{off}} = 2 \\pi \\nu / N$ yields the quantized fractional values in the adiabatic limit , cf.~Eq.~\\eqref{eq:trans} and see the Methods section for the derivation.\nThus, the transconductance of the ground-state band is directly related to the topology of the band and the $2\\pi\\nu$-periodicity of the state evolution that is controlled by the offset flux $\\varphi_\\text{off}$ in the chain, see Fig.~\\ref{Fig1}b for finite voltages.\n\n\n\t\\begin{figure*}\n\t\\includegraphics[width=0.75\\textwidth]{images/Fig3new.png}\n\t\\caption{\n\t\t\\textbf{a}, Lowest band for $\\varphi_{\\text{off}} = 3.9 \\, \\pi / N$, $N=12$, $E_{J_2} = 2E_{J_1}$, and $\\varphi_x=\\pi/2$. \n\t\t%\n\t\tAt $\\varphi_{\\text{off}} \\neq 2 \\pi \\nu/ N$, small energy gaps $E_{\\text{intra}}$ open between the individual states within the ground-state band. \n\t\t%\n\t\tAs long as $2eV \\gg E_{\\text{intra}}$ with $V \\equiv \\sqrt{V_x^2+V_y^2}$, the state evolution will diabatically jump over these gaps (marked by dots) and maintain the $2\\pi\\nu$-periodicity (provided that $2eV \\ll E_{\\text{inter}}$, cf.~Fig.~\\ref{Fig2}a).\n\t\t%\n\t\t\\textbf{b}, Transconductance $\\langle G_{yx} \\rangle_{T} = \\langle I \\rangle_{T} / V_x$ in units of $G_{0} = 4e^2/h$ as a function of the applied voltage $V = \\sqrt{V_x^2 + V_y^2}$ averaged over the time $T = 200 \\, h / (2eV_y)$ for $\\varphi_{\\text{off}} = 3.36\\,\\pi / N$, $N=12$, $2E_{J_1} = E_{J_2}$, and $V_x = -\\pi V_y$. \n\t\t%\n\t\tWe observe a quantized plateau only in the region where the condition $E_{\\text{intra}}(\\varphi_{\\text{off}}) \\ll 2eV \\ll E_{\\text{inter}}(\\varphi_{\\text{off}})$ is fulfilled, while the fractional quantization vanishes for too large (small) voltages $2eV \\sim E_{\\text{inter}}$ $(2eV\\sim E_{\\text{intra}})$.\n\t\t%\n\t\t\\textbf{c} and \\textbf{d}, Standard deviation of the transconductance $\\delta G_\\nu$ (see Fig.~\\ref{Fig1}b) with respect to the quantized values $G_0/\\nu$ as a function of the disorder strength $\\delta$.\n\t\t%\n\t\t$\\delta G_\\nu$ is determined in the range of $\\varphi_{\\text{off}}$ where quantized plateaus would appear in the case without disorder. \n\t\t%\n\t\tThe transconductance is averaged over the time $T = 200 \\, h/ (2eV_y)$ for $N=18$, $2E_{J_1} = E_{J_2}$, and $V_x=-2\\pi V_y$, as in Fig.~\\ref{Fig1}b. \n\t\t%\n\t\n\t\n\t\t%\n\t\n\t\n\t\n\t}\n\t\\label{Fig3}\n\\end{figure*}\n\nCrucially, we observe quantization plateaus in Fig.~\\ref{Fig1}b, which cannot be ascribed to isolated $\\varphi_{\\text{off}}$ points. Moreover, we consider finite applied voltage biases, which break away from the adiabatic limit that we considered thus far.\nHence, the formation of these plateaus can be only understood beyond the adiabatic approximation. \nIn particular, for $\\varphi_{\\text{off}} \\neq 2\\pi\\nu / N$, there are intraband gaps appearing within the lowest energy band, as shown in Fig.~\\ref{Fig3}a for $\\varphi_{\\text{off}} = 3.9\\pi / N$.\nThe size of these intraband gaps $E_{\\text{intra}}$ generally depends on the detuning of $\\varphi_{\\text{off}}$ away from $\\varphi_{\\text{off}} = 2\\pi\\nu / N$, i.e., away from the locations where the intraband gaps vanish. \nThese intraband gaps break the increased $2\\pi\\nu$-periodicity down to a regular $2\\pi$-periodicity in the full adiabatic limit. \nHowever, for finite voltages and small intraband gaps, the increased $2\\pi\\nu$-periodicity can still be recovered for sufficiently diabatic state evolution~\\cite{fedorova2020observation}.\n\n\nIn particular, in the limit $E_{\\text{intra}} \\ll 2eV \\ll E_{\\text{inter}}$ with $V \\equiv \\sqrt{V_x^2+V_y^2}$, there will be diabatic Landau-Zener transitions at the avoided crossings within the lowest band, as illustrated in Fig.~\\ref{Fig3}a. \nThen, the Landau-Zener probability $P_{LZ} \\sim \\exp[ -\\pi E_{\\text{intra}}^2 / (2\\hbar\\alpha)]$ determines the probability to jump over an avoided crossing \\cite{zener1932non}, where $\\alpha$ is the slew rate that is proportional to the applied voltage and determines how fast the avoided crossing is traversed.\nHence, $P_{LZ}\\approx 1$ for $E_{\\text{intra}}\\ll2eV\\ll E_{\\text{inter}}$ and we restore the $2\\pi\\nu$-periodicity of the lowest band for detuned $\\varphi_{\\text{off}}$, which ultimately leads to fractional transconductance plateaus. \nNote, however, that when the applied voltage for a detuned $\\varphi_{\\text{off}}$ becomes too large, $2eV\\sim E_{\\text{inter}}$, we will likely observe transitions to excited states within the next energy band that will destroy the quantization of the transconductance. \nIn the other limit, when the applied voltage becomes comparable to the intraband gap, $2eV \\sim E_{\\text{intra}}$, $P_{LZ} < 1$, adiabaticity yields that there is no $2\\pi\\nu$-periodicity for the state evolution. \nThese two limits can be observed in Fig.~\\ref{Fig3}b, where we show the time-averaged transconductance as a function of the voltage bias for a detuned $\\varphi_{\\text{off}}$.\nThus, clear fractional transconductance plateaus are visible only in regions in which the condition $E_{\\text{intra}} \\ll 2eV \\ll E_{\\text{inter}}$ is fulfilled. \nTo summarize, the formation of plateaus that are observable in Fig.~\\ref{Fig1}b are the result of finite voltages and non-adiabatic Landau-Zener transitions that lead to an increased periodicity of the state evolution for detuned values of the offset flux near $\\varphi_{\\text{off}} \\approx 2\\pi\\nu / N$.\n\nNote that there are no plateaus for values $\\varphi_{\\text{off}} \\approx 2\\pi\\nu / N$ if the inter- and intraband gaps are comparable, $E_{\\text{intra}} \\sim E_{\\text{inter}}$, which, for instance, is the case in the example shown in Fig.~\\ref{Fig1}b for $\\nu > 3$ with $N = 18$. \nIn general, the amount of observable plateaus increases with the length $N$ of the chain since the width of the lowest band $E_{\\text{width}}$, indicated in Fig.~\\ref{Fig2}c, decreases with $N$.\nAt the same time, however, the plateaus' widths decrease with increasing $N$ because the transitions between different plateaus $\\nu \\rightarrow \\nu+1$ are in a smaller interval of $\\varphi_{\\text{off}}$ for larger $N$, such that one would observe rather sharp quantized peaks in the transconductance for long chains.\n\n\\subsection*{Robustness to disorder}\nIn addition to the stability of the effect with respect to the offset flux $\\varphi_{\\text{off}}$, it is also crucial that it is stable under disorder along the superconducting chain. \nPossible sources can be, for instance, non-uniform Josephson energies along the chain or additional disorder in the fluxes of the superconducting loops. \nIn the following, we will analyze the robustness of the quantized transconductance with respect to flux disorder $\\varphi_n = \\dot{\\varphi}_x t + n \\varphi_{\\text{off}} + \\delta\\varphi_n$, where $\\delta\\varphi_n$ is the random disorder configuration with zero mean, $\\sum_n\\delta\\varphi_n=0$, and disorder strength $\\delta^2 = \\sum_n\\delta\\varphi_n^2 / N$. \nNote that disorder in the Josephson energies will qualitatively lead to the same outcome, since both sources will lead to disorder in the hopping amplitudes of Eq.~\\eqref{eq:model}. \nTo quantify the stability of the plateaus in Fig.~\\ref{Fig1}b to disorder, we analyze the fluctuations $\\delta G_{\\nu}$ of the first two plateaus with respect to their fractionally quantized values $G_0 / \\nu$. \nIf these fluctuations are close to zero, it resembles a perfectly quantized plateau for the whole width in $\\varphi_{\\text{off}}$ of the disorder-free case. \nNote that even in the clean case, fluctuations are not exactly zero due to finite voltage biases and finite averaging times. \nIn the integer case ($\\nu=1$ plateau), there is only a single ground state that has a finite gap $E_{\\text{inter}}$ to the other excited states. \nThis gap is reduced with an increasing disorder strength $\\delta$, which leads to a larger probability for transitions to excited states for finite voltages.\nHowever, by applying smaller voltages, we can reduce these transitions and, hence, obtain a better quantization of the integer plateau even with disorder, see Fig.~\\ref{Fig3}c. \nTherefore, an increased robustness of the integer plateau to disorder is achieved by simply lowering the applied voltages. \n\n\nFor the fractional plateaus $(\\nu \\geq 2)$, we observe a fundamental difference to the integer case since there are multiple states in the ground-state band with finite interband gaps $E_{\\text{inter}}$.\nTo achieve the required higher periodicity in the respective state evolution, we need to additionally fulfill the condition $E_{\\text{intra}} \\ll 2eV$ in order to generate a fractionally quantized plateau in the transconductance. \nDisorder along the chain will also lead to increased intraband gaps $E_{\\text{intra}}$ and, in particular, a finite intraband gap even for $\\varphi_{\\text{off}} = 2\\pi\\nu / N$.\nAs shown in Fig.~\\ref{Fig3}d, the resulting fractional transconductance is robust against small disorder for intermediate voltages which fulfill $E_{\\text{intra}} \\ll 2eV \\ll E_{\\text{inter}}$. \nFor small voltages, the transconductance is sensitive to these increased intraband gaps, even for small disorder, leading to increased fluctuations around the fractional plateaus. \nFor larger voltages, we obtain increased transitions to excited states, as the energy gap $E_{\\text{inter}}$ is effectively reduced by disorder, which increases fluctuations of the transconductance around the fractional plateaus already for small disorder. \nTo conclude, the precise choice of the applied voltage is crucial to achieve optimal robustness against disorder.\nFor the integer plateau, a lower voltage is beneficial, while an intermediate voltage with respect to the inter- and intraband gap represents the optimum to observe well-pronounced transconductance plateaus at fractions of the conductance quantum.\n\n\\section*{Discussion}\nIn summary, we have shown that quantized transconductance plateaus at fractions of the conductance quantum can be observed in a chain of Josephson junctions. \nThis transport property arises due to the interplay between nontrivial topology in the space of fluxes threading the superconducting loops and an increased periodicity of the ground-state evolution.\nThe fractional transconductance appears in a step-like pattern as a function of the offset flux along the chain and constant plateaus are visible due to non-adiabatic Landau-Zener transitions in the system. \nCrucially, the effect is stable against small disorder due to the diabatic transitions, motivating the experimental exploration of our model in realistic devices. \n\nThe model presented in this work was discussed in detail in the limit when the charging energy is much smaller than the Josephson energy.\nFor larger charging energies, increased intraband gaps will appear since the single Cooper pair states of the lowest band will hybridize with states that have more than one Cooper pair on the superconducting islands. \nFor small intraband gaps, this effect is comparable to the case of weak disorder. \nFurthermore, also dissipation can play a crucial factor in Josephson junction arrays.\nInterestingly, interband dissipation toward the lowest band could be actually helpful to suppress transitions to excited states. \nIntraband dissipation in the fractional cases, however, would be harmful for the fractional quantization since it ultimately destroys the increased periodicity of the state evolution of the lowest band. \nNevertheless, our study pinpoints realistic conditions where fractional transconductance can be observed. Our result paves the way for future experimental realizations of stable fractional transport in superconducting Josephson junction arrays. \n\n\n\\section*{Methods}\n\\subsection*{Derivation of the low-energy Hamiltonian}\nWe follow the Lagrangian approach \\cite{devoret1995quantum,Vool2017} to derive an effective Hamiltonian for the superconducting chain depicted in Fig.~\\ref{Fig1}a.\nThe Lagrangian is given by\n\\begin{align}\n\t\\mathcal{L} &= \\frac{\\Phi_0^2}{2} (C_1+C_2 ) \\left(\\sum_{n=1}^{N-2} \n\t\\left(\\dot{\\hat{\\phi}}_{n+1} - \\dot{\\hat{\\phi}}_n \\right)^2 \n\t+ \\dot{\\hat{\\phi}}_1^2 + \\dot{\\hat{\\phi}}_{N-1}^2 \\right) \n\t - U_J\n\t\\nonumber\\\\\n\t&=\n\t\\frac{\\Phi_0^2}{2} \\bm{\\dot{\\hat{\\phi}}}^T \\bm{C} \\bm{\\dot{\\hat{\\phi}}} -U_J ,\n\t\\label{eq:lagrangianchain}\n\\end{align}\nwith $\\bm{\\hat{\\phi}}=(\\hat{\\phi}_1,...,\\hat{\\phi}_{N-1})$ and $\\hat{\\phi}_{n}$ being the node phases of the circuit.\nFurthermore, the total Josephson potential $U_{J}$ of the chain takes the form\n\\begin{align}\n\t-U_J &= \n\tE_{J_2} \\cos(\\hat{\\phi}_1) + E_{J_1}\\cos(\\hat{\\phi}_1-\\varphi_1(t)) \\nonumber\\\\\n\t\t&+ E_{J_2} \\sum_{n=1}^{N-2} \\cos(\\hat{\\phi}_{n+1}-\\hat{\\phi}_n) \\nonumber\\\\\n\t&+ E_{J_1} \\sum_{n=1}^{N-2}\\cos(\\hat{\\phi}_{n+1}-\\hat{\\phi}_n-\\varphi_{n+1}(t) )\\nonumber\\\\\n\t&+ E_{J_2} \\cos( -\\hat{\\phi}_{N-1}-\\varphi_{y}(t)) \\nonumber\\\\\n\t&+ E_{J_1}\\cos(-\\hat{\\phi}_{N-1}-\\varphi_{N}(t)-\\varphi_{y}(t)) ,\n\\end{align}\nwith $\\varphi_y(t) = 2e V_y t / \\hbar$ according to the ac Josephson effect and $\\varphi_n(t)=\\dot{\\varphi}_xt+n\\varphi_{\\text{off}}$ being the flux through the n-th loop. \nFurthermore, $E_{J_1,J_2}$ are the Josephson energies of the two Josephson junctions in a single superconducting loop.\nConsequently, the total Hamiltonian can be summarized as\n\\begin{align}\n\t\\mathcal{H}=\\frac{E_C}{2} (\\bm{\\hat{n}}-\\bm{n}_g )^T \\bm{c}^{-1} (\\bm{\\hat{n}}-\\bm{n}_g )+U_J ,\n\t\\label{eq:hamiltchain}\n\\end{align}\nwhere $\\bm{c}^{-1}=C\\bm{C}^{-1}$ is the dimensionless inverse capacitance matrix, assuming that the capacitances of all junctions are equal, $C\\equiv C_{1} = C_{2}$, such that $E_{C} = (2e)^2 / (2C)$. \nHere, $\\bm{\\hat{n}}^T=(\\hat{n}_1,...,\\hat{n}_{N-1})$ are the node charges and $\\bm{n}_g$ the respective offset gate charges that can be controlled by the applied gate voltages $\\bm{V}_{g}= 2e \\bm{n}_g/\\bm{C}_g$ on the respective nodes of the circuit with the gate capacitances $\\bm{C}_g$.\nNote that the inverse capacitance matrix $\\bm{c}^{-1}=C\\bm{C}^{-1}$ characterizes the interaction between the Cooper pairs with the respective interaction strength $\\sim E_C$. \nIn our model, the inverse capacitance matrix consists of the elements\n\\begin{align} \n\t[\\bm{c}^{-1}]_{mn}\n\t=\n\t\\begin{cases}\n\t\t\\frac{n(N-m)}{N}, & m\\geq n\\\\\n\t\t\\frac{m(N-n)}{N} , & m

~l=x@I50`$1Oz z36`T%Vs1(QgTL*Xy;JMh0KWfY=zjM6zY@{(#1G2(Uba?VM# z|KULgt6(9fmg+A0$S}GNWP3{KY%&zeQ#FyVanQ}4zl#||*x51WU!rudW{Cg}ko2sG zP}@k7u1;5O{Du7$^xSZoedqar`8$m#X*MfmYebuS?m$W zLqGSlF&cz%&g&dY|Jh_n<9%awk7Rl7!znT>%c-HB!AO~*;8N;H5>T3QS~7izZ^eF^T*h)?aS}XM=qa#OR5}|sPQND%#I_hF zASiQE9VKxFnVx=AtcMMc|-KYxZ^jw~7Bd4MOB8kuq7bj9<-bi$! z(+8y`Hn+}E*r&UCZ{-_-Nqyme(Co-yRz#vG5AV)sK)oTQ&#pNOv&Mk-ir!fm&_7J_ zh-*P_K!6eFFjNKVox*_P{OVww0?XD67VJDg#hSS527Y1AIfYCJK@B6Da-=db=F5`vA5KeAB61LoT18@~7Pn-+?AaG!z&0urXtPp?*Pc8npp8*@pW2lsJGQ4YO4 zwLj?90h)GDiPXFY&a|(hd*pdruAP8D(B4pa>sz&YB<|gFaS5!DxemB#9T{^GYzUDC zL=UfFqQ8+joAl8_Ze6tn@kAE|d&}y(>OSbKu6Irh!$F(9T0;Qka`R$SNqL>;>R09| z-?Ab5MsBihI^+dfwYt#Qp{TNtF|#J{zwOhV&Tz(D53Tz<@aXBz|;pLKu71u+NP-T_7vJP z$SnNo5fVjB+GZ^=Z`Y|wcuE*tCG2`(hpTqSH=HO)&0miWOPryp zt4kA|uqHVbCE_wa@%k)`njaoj$)-$uH>Ri@FF+PE36Xrn&wqo}CZ{2D_KL61^XeUt|H(ekYT3JQZh9{t&A4MY=x?*i9>uC#yQS#j0>1@_VH34Ar6-99F6Ell5HlM zH>#x3%AOr&9G^%LJ@5MsTC;K{$4JN5d+%JTm30ioMp zYJE3vNNrwAKyWKLV@)nHDKEH!ck1RlBv&x+o6GlerI<2WMNavZw8X#XT0Fy)nKEP* zp0nijl?9(josbYj%(`?{=i#6JTec?=6JZ|BI}I9XE}`3Y~cBtbgja~ZhJ3fg9#8mvaHsI(88h0SmeXq5H( z5**3d*V|)*T8tKNN-zq-=oGI4m*}iU(Am@fG{RQ-Yb6=$)fh;eC zP;uQ4p7@CX0QeE@0c3>*

QQZEc*4ZJcOq?M#j|tZjeF)vs+`0g}Gdn{k~5(M~kF+@OQ4 zf?9eaHBu#}86p0qBVJM|s>dS;ExVgu%-$}T0=94|qjGi>)ssnydxQ1YH#eTE2*0?8 zQ<5z=3?qVJ7Q)EBa0vXog-CSt25TlQhnIP`QiI7=)6#huBPRgjr-D!@=i!8Ie|do3 zb)sR2WSE9usD$KlC!r_K*pr)ctzNf<48&(2T}9AK`rRN%+8Z6r;qTc3;_U^~((FWL zlc0#q&qu&WA^Db`cnO}6co}_$K{FO!Er_MmFg}9 zhnB@2eoLijVcyjDn;5CQ-=M+)#fGr~-w1eunhw1IpAb1T_zx2yv0@GVQw5hr|Bt(G zn5Qn-7O8f1KWMWNAIvi|N0PN`@h`+GdE@?tZ zLA8nLF;Ps}t5Sr{b$C{bK>q<-WRf(aZ!>o`2XKX0IIa=4Mm`@qNelSp#9?B=2^}oE zr<{L{ow;aE4;!P*jLboV{rmo7odk9-bGkcpw-MLP2{5uh^nj9p)Q}Ykhyp)XvH(6* zhZMDY(D=Ca2S?yD>||uA{vg(DoiP#%mNLcRfy8^F_wi^(S*tKY+PpL4QaFo~0`u4N z=g39*6$-OI&T*}SNFws^6w$?9>=R#C)JKgP>?ZuLz}F%I`&;eh09_^57hOoV=mlrw{Q>p^W*)zYnl9ie~FDU87s*n zopO(66h`XUH;P%FZVP@avqteffd#PrT{p#8Hj#0;qn1+Nev z)Dnxwe_dx+z^bHtt>p#URFh6zO5sYtJ8s)`7-rpqa)vbcVzENVr}d&-2J=R*Q=?ak z4y{+At+jL~FbCDOA#&p)uwz?r<0`;wR)FUuz-v{2=OVysQGn+xz-v>0=PKa4E#kWk z^(l<~Dg5g(dQ0GDHEXuTxN1RsnZsKB!YRH%=QSV9Q#vw zVc20*M-_)Es~3Lo1!;BX4f6WT`yN=TEQNE+C5Y4+66yPuP5wAWhkDay!~j51{2{)e z1tr7^AHM903GhG2(k>7I)S$-Y8Wt;A!wXVF`!3W)ebP~+r59dE=8y-;UqdMZpb zm`NnNbZXT1fl}w}Mt2=D1R^xb*6>g!D9Zk8Wm1+q)_r(J=MscZ|1U7`H#kLYFdU{H zdt!>3cJIqkoQibkbF#OhiSB4|DJ6;;HV)D`OL8yro$&93~^wd>Kv zN!9i2=KM=8Sk_ie(gO*6uDW`@y1RP1yLP&}X1cp>y1Q@Py%F60?(A>Gb~|bI&smtJ zKX2KoX;r}GU`r}+O}(y4OtMX86yo!TD{s}dDRCZig+k@*`(YlXXKccM+( zx(ic60I0*I8U}VZfQPZUcVfNk~ zS~u%g5JrrbmHT)ghfsc{tK(={_>_QIw!jZGbI3?75vJ2D9nxj}3KujdIt4f*Fc+3WW5oa~B>o zdl}V+Ax_%3*aICC%=>V*?$gGe#~+Xdht_kks>b=tDAEC4#^hcWM9R>bzC@x9W6Gc| zY&jV$sc2U*yb(=sEe;J8c<2}(?+3`J1tF9N88^n6(f*46gf@^6fa2%a&o1^LR`AFOq z@LXF>wH#IIDF7~ndm2HAk+Ze?$ZEV;7{zGZC}cP{r8muo8ffY7j54O-ql#UakS&Nc zJvw+DEUa{a)%15s2WeCUZ&buQg(|2SS+j}o%6*ulyDWXS#WICq@I$qZ!33`{>yXv_dfY?KT; zAL4*cWGdsQxQG>df83_T**WCKJ?}WL7W0wF~5tn^&@SgHDzr08tuo)ErdqyA#DYo58&@z3aLE zVUb@K_5`6B9_!gbGygsK7?Y?Y^CZy`STXro)Po-L?K=i66D&uX_vFA*TbK8UvsV*_D4M&KffjtZ?8z$7PvLNxR9g9d_}#m%~&m zKv-WhXBc}%=F?;=y0S>$oZcx89UHUyV)x6)!uvy;Og4}EvGD;4(e z#_r|A`fUf5U`04Mq+Aaoj%nykCA873?itJ8gZzMf*ZYeuyJ_ROcjr7 zj3Q4(Q!S-M*O6?TskBiPX(HX;DMC2F^_x>Zw3XHk8~SW@Yz0%+Lm@wX%dIsM>yMFH zdh6*!4wb0(!p)d?qfu~u`@gmi5z&8ZSrqJ5G(vyUY_je2!MQABqSb3HexHY$AYJDe z@-l;akj>J8yfc^oV$fnQEsEJqMwI-mx3~FQsXh=4+4w5U0$M4TT5O|l{tY_Q6yZZ+ ziYDJxjrphSYwTZ0z$C*w z+bi^s%OR_mcOw(+tazFtvQSkIuA5^;G_CQl)keXG;qUv#{F))lO>tm`RykOOXh>ju z4$_0&hHl+t`tr2l-8`)P8p}9vrZn|0vUSkurlTho!I4I+KgZVPJH`t z%xNB2nI!fFE1S~$a!x!hKr329G$6xs0Qy>jex@xXr~IvE1@r7@Rhm<#rxMSGQ82uj zFP2xuAOIlX=cN3&RP6cfS2&*-+CG^))Nmr6wT2GHh6AJaa;8oN%cL@}hyntT zaz`R%ZAh1t?0X`XEZF$2wF46A26gOEiHbu!C(R< z3qXZqB@(HlI-)&rv=OYhr4{h&2njR*{b#5CKSJ`8tNzam8i4V?s|80# zT4g0L0LUDXi z*9(MPI>~#qY{FO#ln|&V11ey0fa=~y>Q=RpDDJQ7w!w3z&*#`b&;2u5ksK}=gkNue zhUA4-!J+EIm&O-bd}xllrMsWvu;p;OeaBjXePILwgh7IvROp^kV!d3-SWVynsVZT{ zf74{^l*~qi3*$;L$o-+FjQIixsSE|5Xh@-BeaM26MqXuhonoEKeCrYPi$I|mgxMIC zNYtz#IWgX>=<|z3cia~lxzFyP7LoykP}_W0ziU?U-;{Q9)>_<72V8i1W zJ&9HQriS5HeFsAEQI&3u2C@j|F{R?pgv+0TjtUqN) z8)=*t6TQp)Lx+zAi9KfvtziD2y)M>2(1VZSi=lLo>c2Ps$QcLrR(aBQjE$!i74Cni z=HKFaLbX!-wwl|YLvc7a@}|rEuo+d+>$moPoB!@xBvp@sl`64;mT^3|EQ9Mffy%-i zq@ZoaY-}ICnz09HvL%cAq_S;XvMZ|TLbv!Ah(SAlnmBtEkUg+D(tW-SaI0bU4udV8 zJxtXdGc*{XrobZ}J(@REN){(i{6l08iWO~bsw5r0weS^}DPj5BUU+uJLj|XIfP_m; z5lfG=t{Jo$Gz4m(mo}ZI-znZl^ z&BarwM`DY*DdK*jPr?EFM+n>4Uz|K{WhQw9F(hc}+vt$VuDB#k&BM&8WP0xPN!?MT zDbpZ?_lrTg@b<5Le(&TRQXKivzoZg7z9_&;Y+i(SfOS6ma?uJ$VLRW4X-&>02Z;rq*MT`t~8 zsHpNLd~MTMcg-^fEWY8mi0=^s3Nd6ob97vdiHBz?daP^}I_-EBM-!gwSPr4J5CLxE z1N@m8G!e<{WjTtsh)}(|h)#ba^hDmjs+_W{vFcIVa{N!k6Y+c#l%iKgDCmLs=L|qk z*E3P4e+LjW|CaS-xNKN+CZZC^z8KRl)fAuJ`;;ntTU76bSs_eIR=n4{d5=)nJPnYmuDt>;8KTQ!UyM{OJs~axVrc(p4nxY*PYh?H7+6b zZ3@ZN8A04VRMAap-NiwB!=rgZ{P~p4UD;Dyr@~cdLqL=yHjuo_f1Ll`Ay7tM~qNE;CkkOFnLAImhXBgHye*$*x(E zvWLaFV(0xFsYAa7e}^>I0aEGlqe*9ywZ2xCIb#!vKGX9Tzy7hZ4`l9*CyIVlm zo<8k|urTG}pN~D;T}XzQ>s?IkF&Rm9rZ7Lr)(3WvDFWP+4G_Dkzn{VAd4#1=LWv7Kgi| ztzGrk3GdSjEzXxkSZ4&gR&ec${GD@@FY4YvAlFzAjP4CUFJgK^Z39=!vdSugA?-55+e$9TdGmb^H`A;P>V+!FzM!DKR7u#>B;zKeC(#4$L1X0)up zDO>|}un=uWpkqArPQvdF_GaqLgx)}}B96e&8mB%Q;$!|73zwyPj8s1Eu8u@ZF_HIy z2ZmmOse*2-UmyHTgOI{P6H-Clp5hTjf7XiQQ>cT%K=?s4+{PO5`t;aNzz4On^R|B{P?})g17Rq9aPmmfF6J;L zY#&Tow*WaaSk2&inILALF8=h`-yr^1k0(zKwWKMRriUW66pDK5e=C&z7<`c~oDSMX zVDj=Go$n_0tl{V}31%!E9r~X}C@KBU{FfXQVf0pzMRZHr%ajB;Iv6Mbb-_p%ZjE6v z33S0R%)5%#zBHNr?7xPPo4om?6g~rfx9ubh)))$O>KtK?v%$4V>4!Rv7t=!O>0Gs} z>M!Z|6gCsq&PI`_XdZ%3&1PX`RhE$|a3l>|8tvf!M5lwZl42=ktKeL+n*W6tkqYQo z;(I!j6Aj}hfa@$%gbeCYws5xOz^d*d(*J5{r|TCC4DyeiOI7=Smu>1pbH$`Kw zm7#~M-dVjSPNpFxgkMas8y$;f&~?B^Rsv}i9sx~CiVrzpUjmMJR||eibEP&IYVs0pvp@@C4y9YUJ4b0g~jt{Szmb_FE z;aA|aN}oK&IMErcLJJS}^mjjp_WV#wuk{jh$%_MP9tIH?y|Lv@s~ea^lA4YRI|!;v zZssXhR;s@gsGi`tm_BroBahr_k3yoZg;qe&{4sexp|5AHt%=M+nPCVk`NCwvtdbzi z5%N~J0y5Ufj8i_%7*Do!RiWmxVG*BS96Lr3l~VCG2jI4}K1`lyY;WC%t%b?X4AWmf zCbXfOpvCWSG1jS&a?fjWnM%KNObGV$ zOiR!{r6y4`X?YMQ{&F;g8XS1PujQ&_gqa^`SFjSUB~~lQJ#ibov*$8~{MqEyOeh}} zTAyfUrN^ttKzV5E%^%aRC6)9u3+!Wb+5&FRO?o*uP-PCg4qk^W6s|Q;)iPBgxP^?NWST@1s$6K+ z?-~-L=Lz=L-E7ZY5N^}!_;oZWlIHxy<{{0Q`ZEV?F4g=qh7)1oAESXbBAVvfhSOEK z><>|O?n709aVZ}WKLEvZvE`;hkM{%Z<6IWfkI{%ftbHqtVW9yFQU~D|nKihvdLGBK z|IK;z`D*@qq6qWVUU5dYaet1Hw~;YLTw>8_>YhrYxofMgIU5DZXx4+x)&RHh1%k@H zi&lSAcBu(w(|kCJ%}~=vzZ^HDIae=Sjr19V@3%3e{=*4uIip`C#r_hH1XCDjrkxwv zjzHwOp&u!8Ib;8t$&$~m_(VbgO4W(~mhrQ?QXy{BhS||3&hNkI-}r?s_13+|1!_Ix zSe@lE6Yjr{?IRHaTrhS(_;pjSz*S04+9h{o1$feU6ew=vJYV84%0*FG0;Q-qZ>rOr zJ-HL36G(p3T~++iqS;L>6USDLGQ7*1Wa0j!x8Zq&p$D;TXb2&Q-H~XyQeN^9b)~GE zP-L?;D0153T-S`A9|l^hvNRu3^GFt_!qvvTmS5xQ;DV}k;N1eVoCI}{7DKdJzGj6j zRank_7L{W^*DCQOB+-i327K}Bh0kZTI#R<{KKXo^k8(^yPjx96TC&Nyc$o(SIsZB< z7zx#ZRN88VN0|ebV}89wZ7vjfcRl9B+frMJZwy_@US}{;5962{L!IqWq0|S=LtOcv z(B!>_(Nwfrd5Dl9dSvlfmxYi8?7fAeUVl(A6@gA5G>0sWDkHL;fl&PoWoTaTpTSY0{sjv<(3hyCyL!^YDgp900qyQLBKgFEY9)pbkvS>x{38cincNRe z@Q#)G4mlVrkBDCO1Mv&-5ozvS#I$OcgZyEYdWNNl7w+<%Ku+`aKHz7zKUmPWbO>1R zpZ(VhA*%#s5NNAml}DSge}306*WEHpxjT(lYp7cya_}Vsaj8yHyX>)J8(HM%=wP{+ z+4NY&O&v1kp?Tid3(J?HPI2OG zO(0Ys2`H|dS;|n1kcIzUAc}FmWgFJ^Q~Rd)#8i2=*W?fRwLa|Yo)h5uW&aO zL_`dE8SZ1)^!eh$?bMVZuuQ&gm56)cs`kfiOBV)e0)^BdHo&13#KKnm#Yt#oMIY(= zjhy2@D!n7o?izo6^fF(A??GldB(pNxI$b)Xey3%opI74-72<897TrN$($XXB@8Zhw+9W^TY(*3cz^fW;{1plJ z!C$pvnBHgO2mX9q(czD&BzaNcu1pyyA8oOCEU+12w}Lt|TCC?T`fUSGx7|#b&!~Y) zEp|cux{F!XA(PAPg@bz~ldvH=ao5DDS{MZ!%|n^ zKA*y!F^{kDyG?@^rc?jEFV-8Ptzl?R(JZb7tCXfuDheHvQ}8LUPJTKxrr&t`O5W>a zs5)MF1$V3i*4}#dc!I>z7eCJEbl)Q1t#EYAe^|yTtVz zqG&NiZN)mk)bpkRjh~T(3jlSt9!g2jrHKZ0VcCS&=U`fry^rse{f2%vi%59pb8Sf|{p_%hWWLy%CTSq*jOPS#t(C=O^MJPVnuZ<{6f}yt9i_|x zn<4(KW2YHHv6W5%%0u&4Nxw|AXkzzj&#x@WlEzsO4|WH$_T|jz39-H^+=)T%ujS>| zKpP!&BBX4{E2helWhdJ8{#xcYb^yem9B|z*_u3)n9_TxJ`f&6|IWHeap-V1<1B0p6 zperEY7^joz+6fZck<{UDW@QdWb`cnl(wG$rtNtVEUfZBW3Gtdo$^^K6H15Q|E1Gii zFh3UXU9IDoFN@7$H6I-Eb^prf(Qdr;R_j3%YS}#A5?;c*gmSSL9;CKRE!nLjSpN(p z!;pxYz_cDkb|Qs^s!(OcbdGNXJAcpy_-pu7A)gaMmfj4)l;v?vCGykW{zEKdqlFtM z)m?#$jgAZQHuqM!qcJ%e!@r&)gbjDp9C+LmqC-k_VUdgjxMm4sFN^!49R|zE7S1e6 zcGu@XysrKATuRK|T*tK6L@JK_geoY5^eVlS^g_bS6;;M?g9_;PsOR{J@vcf zT;V@U&)o`6j1@`Wv4&t;KeimHP@JKhoKt$agS_b%9X~o8kbg69h&SJM^r$Q}sJOHt z!0&FTK8;y)D4%H^Po`L2CwFF-mpvBt;`~!QIDuOu6%uCxdtU}*UiP_qS_7{b{6ly& zq)8E?eJ@%uB70W@;kPW8fxNKvjMZITa9zdgZz8EUnqBdaOI10o%cPqy zj5l%{m>bUF&qCs?`Rc}o=iAR~IwSr25xHLhf85Vfxb<&K6fnk@*CO%PG#=ch6d^Ce<-8dbU}X0JUBu7Fw^a+puk5&#LSQjl2DHc=J!2 z9-h2}n{sN>$V!Bi8svu)&Y}ghCdlaP=uuPdeIT-N66ni{e?oGXN2<9hnPCf^Xh!64 zx}+k7LX1#Y>fRQQghslBMJbD~f3^vgn)q zV0wtUlZAqA>_%=D-QB-bD{TqrLjN05!M8s*U450)6<=z+gi&Orkd}P70}RTFbRcz@ z>AX;yYWox(7SLd9WkU{rW#h zDzA9YO>6^Y)e0pm_1|SkD)SI7F0K>Koi}@#>mKv0b|wOioh+GX(;V6VbQ5ysc)Q!b zIK%t%GQ-39Hg#}=r``F!7jA*P{C#^cU^|0XHs-|R`*u|D_Hj34_;gb8Yo=Ryq~YN0 zW6U*SX0cGbtcNwnmL+5Dcp?p<;%(^R4f~px=Q#TAU!}|wsA!mxnd6Lkwskv{uk&5@ zS?JJNHJj#(B+~29NzS+a^*=S0>}`kRJ5^opB-!**%08~C)AvipBAIyU=}hjks)vfc z%*hAaZu@4<3cbxU;^HdCI(pJQgCp7X=VPb%$qc3%gHE4q&<3uo-8n6nsaJ)mZz5n# zMNMy#Tchk7yZYNF6=fMwjNE%|2JQ4w_ognT=HaM`Bhmwk1E~n*axa-S$&KkC^Pruq zHV^)}Dw(mM3Q;9T4v9{W_PVk(1-#3Yfr-1L-g}XxnNBULuJ6%i4=`req;7&Im9_bvE6qrD<38O$2ieof7QzU?|S{=c6my35MriGj;EV9j@@(8$rrb zciZ`6@H80*obau059lc2<`YXVIcKXG6w(K>f$$<*^*-e(e0>s9l4%D7Nc+>k0gzAr1K zgaT_zV&>3l%Hl6t%|MSTiKUOwbU}o0DwB)~oDR=SeDtR>Mu@jf58ApgB`H|?m)Br<*GrI4}Pu_Wcq3!3R_o^Qca-FAEH zFmE|m0R4WYQbgb+j50H{NC>u3akxZukO|Mo3@yUq7pGvj%PLk8sM8nfy?bqSEAmjN zAsq9siaPtXNI*?VVkHHz=@(Hf^5?YFD>Os%Wbto*Z8r-()T$IM--zpWquUg1ieHwC z4QKiB5-WH%mqOvAGKi-_k=2AsDD(1eZk0IW6KcBKpPmR`vzF;y3i(j$VFYFB6kDeq z+ocPCo%xgi=yQa?_go2h`KTy|>8y2KCMIB#RX5PeD3|rqXk-nYKmog6*qS4HnzG#= z8s~>GB^;yvQu_n8uwf&Ydoh_h0Hy`dAnBF#TmN?%bruNNFU_Yc(Zj(YPK(}m8Ef79 zzeDB1n3V6EZIUcuaCpG-=mb+A0@wa9RxJpz>%$gdl>!Yn%0-yj2QsCV0~Whog9N?| zc}rW3OHFY}gdX~8WjJNhhVp8M2X#nuHU)CIR^GGefQvp3!OG3)=OTCtc<$}@*l>Mu zmeH5sts!#C84FAo<5;y}IVt`P7*R2wS>Fu#!GA%(at2QYj2um)z z#g_6b=t&=k<(r6crpTiP!|Fg)<*$BFWeyNrPKs1rSlb<7nTcJ^y;muI9;Xd;e!fsN zvSv)!uuEnlzj=zTUH`)&e&BGEv#e^wb6*0uao3?!3RJ+dQ!uyH$m^p3P%~#L6zU$j zrW`ppHYh-zQ$vz_s4it&1$b$@xV#SuzC?ugysh?Q6gEVH6la>NU&Ucj0f8ndsF*Tn zwXgGOa#NMo{a}50Rq5Y9h)f9)d{0~czjQpaBQfi??NZFA1fcXgv8-7ycKrDcp}qHu zMzDVPd-pUO8e|A*={jMsL@j8zp^Wu0!Vvx-2zZwMbA8IUwYO50I(6^$LV+Yy#LmH3 zRP(C=vseJ@kyv-Q-x7U%>8IYP&@rg?))vQSRxL`e?YIox|`^`79krQjP8|1 zRHelwnbhg*7^sGCu;EGp=i#8aBF@A0@&ebfYoXERlqe9j zU@}vLBr6erEF8lV3W)(QcQmz3Ryhu+fguCQ&uScNt$?omZOe=-LcIZ)U9_?qd^^3i zFv*O%k1(onkFhHd7+Vc)(+?Z1dO!GhZK9~n^gA!3!NjUG^9A>1H=S4^NF2TkN(@#V zf~Kd*qq%|Cz>pAknxQ!ZMa7cKdD&cn>3PO$iU2&|o(gnu& z;z0-F>)jej=L-o^OiGw5GxEopu(4T>9Xb)FSeUC&ogozx+l-;wegOjWMbviDCQ^GZ zSN3%fqIggrPSQN`oQmQYp)FQtW^uVK;v0qcMRxEF($HGwT92|s+zF3(EuV;v?^Gmp zg)oq3VA>0F6>Wz;^)4`6BLg=v^u;qMHNDRQr7Ow2up)TGQ~j|)jbYesV{?Q){?I*7 z%;k{D2mferpQ1I3Bth7kKrHt74A(D!xdYi+sOCZHjg3L%5Mz3f#%T4zg_-t{A$i^x zSE<0*DVH?U!SyBQSS6bANsKMfkNKGB0=0j&lZ+x^sS1zD1~L;Pti&@JG;j|lOa9@H zKCYZV6=cP!%O$58BFqIPW#o9X&+vc853BzRSEPa4mio)V-OxqCKo8_6ciM4K(a>S zl@Oh(Eu_t~U>$%_J_DCiJJ)($hXi{vQ_ww2-4wkZ-OP#2xU)7U?c?xbF?gPp{T}Xu zGU^$gy7L;eDCCj6DBOSA`xR|3l3v73^x*g2@lta5BM4609hCXEA$8g%+SHUo1{J!T z7CglWT@Q2oGas|wwQCGL-VP=fF9nX!l{8!)yJ~H%M7C8Lyx+oNR6L7H-j1113Yym9 z+v*Pq-q^4x%F<#<`%6g=y$B665caJ9m)mPg@g$J>s#W@R2m70uU~ggPDfkvYa#LfvbU^gx5J7DEX7K0`ZC}wN%1dGjr99zj2 z#v@>c`AU2cE(*l$5jr=W8(_t`!8blHQvDjk=RBwp8n6F3Nzva$)VKO4`Gu|1VnW7p#i%T8Y#t&! z^s(~rd0{D0`Um)826^9r88Yeg1&p>P>u80NzF!O#K90`Rxi?kM-Vg1be>m9A?|H<^ zmoxmI6#s942=4y?A`2S>Gn4;ih!*~T4AEB%ZAfo92oP!&2oUrCU8(~6S6C)2gLpxg+CptPhH#=)nCpsGgXA^o$d$a6z6HQ#XO}dT2Jt9u%_+9E_G)!SWo@OJiqC83 zr|*X^G@)vILK?6jAyB;68_?^de|vLYHyjWp4XE{h#YLd`2rob%pkC%6iXgv0bST%o z(tXVtd%p)~IKQepx1b=FuC@BLxvsUg%>Z2nU|=Z^6Xz$tx-SI39|p4Q{K(u0@XEP1 z5eMa0da|Hfjk%%Yl499qR#lUul;YnU)D4*kj>HA?V4%ILgX01s8mX!>JHaS6kbfY1 zzfb12=jY@FrKL{@ZVoOD5I-DWVm~}?|FR$RdVXF%el_;AJ3ZX_ym$GtdDT~PQxTN^ z^4m-NWoQd?RNMfiXtDoF`+W=ZGPjWAyURns-F*@8J|M8YC+K-Wcy&j-@0z(%|qz@Y3O7(6$v464HjI(SWhimh)3!urgE7_P~(QK;Y0o zaAL5k3Tyq%*zG0!P#E-6k3QZP244A<+5VOIdKCBy%c=bB>dB|*{pq^uH8{WCe-xmC zih2G0`t1FbB!L84288*zeD0cfGXceq0|SAL|LTnleb;B`0}1ME_&^E<$MgV~f(M-e zvl1hwLplGVWE7L3vBJ^e@q$!fup{^@@arG$c1t4Q{e_wmpProkK=w?-%R|Az z(3@Xh-d@>R+FILF&`?qJX{NwXSm*c8+S?GBi;a(^3((lv+SAa{((UB!=;`|Ndhl@Z zL7+zn1pC48C2&FPITsQd3okzUA%Ouk{h9|MdWPw|l|F=gp?Pvc`owFnR-AaRm6F2= z3&55zM#t}U$YXq<6A<{3*yx@3>4k>*{VlojgZ{MBbMi;^|6yPxfX9`UmBr=FhTKEi z`vwIu4D=KF5)gV}p!@+o^4rgo7d*qn)YQ~;M}S15BBEOUWxK(^2f>;BruBz};Ik%Y zL;?b@HTRu&H=*2Smsdx_`>5IG||*NSi1mWWi0hLKHJ6~m&}~i)tsYUH`x1E zw>!XS>kj^9QOM~afM=wtNxiQ!^`>h0F49uicG|HQtwi@r962&Tqy1m6uE;D2aPwMJm+TwMe^kd0GDjq2Vy({XR>}0=g;@!{;64HQ1R& z81yqRGX@FSv~M~xpJ9NW)kt!YIRS(2Uj3D;I~SFeT=f3hAOd|{ov#3U|XfJor z)NR>X^QfYIKCFi$&b$@+{fVoniW+{FZb)DyviaaB$&$a|#FFCm#KiKD&Pof643law zIqH#GhCxO( z<3+OOc!WDnZBeW?dbF%pEhcOgS|9YF@*++;Pywk~!y3jLZl+s4MzwOpsap!3Dd}k( zzL&#Q-x;NyY2OGbvPHFIPA<*c)DVw6`P&d@G zY+>UDR*&!zsOT152oB#3A~XxCajOB)avw*9EQ10n6ip}m8_PhOX?T|8uad5VWLTbW zyEOk8{g3Ji7SV#Nj`k1l<4=y{Lx( zfB0dZm4WrILgcO>ro)nAFxP#9#|V)ZiGg+xUfoF3tfmTMF?{2r!%%h*kPc!yNh0VM z!o<0<=0jt~+4Un7kc0*dV1Fs!EotOhA9C*V_gc^7&Du9JF*)tXk`!7JmKx#x-uHwL zRK(lBW<^^Q6_;g$&pG-#F0plv>7-Y)l3TC1#opBC2v8ww2L+9`q68&(6Lvn8FELi@ zSi3ncZF!Ro&0#wsa3)7B$CMyrlrj;I%)l(+e6`%815J!Xfn`N+AkjIM1 z*9rY6U2bvo`vGmAADgOBKjXbsuiIXb&Y~wMd;$~K$VC(BYu2@Nmw;5=B zbW0RAg9SDd}Rfzr*edIbB&jc_8hRGFFzL zozTObEsQ$gk+PWY@)mMxF6u1D&$1XzTcsvVdH+;^Mo-CMu3T;}A&M)tjdJVixHcaa z7li{r_4UO3!RNKyzTk~53=^hdVr6Xda=lHg+kUm8G2|ixda)wOq%TsPbk7QOw1nJb zQE{GsVAjQ&dgRwE{6bY9}9nmZc)tS6_~TjVa3N)bG4`5xF&Z$3Dl z{p$&pb&HUF+5+Iyqa#wfg(umeZq;;va)y7J7~TmiM9Q{6LTFy|EY;um{nc-OeR!da z|Ewed;b$Z(sS}q%S)0RO@2%BEYf;eNMWeWIB7u679VB+G>SztQcTW}^hJS)jV&F#M z0CHsqKQ1f5{46Eh9i(A~f7sPPoZ^fA1Eq$hG&ok2Pg%|wo`4rjkw7~z9c}n1s@|id z`+lnR@BX`3g2lMp<>~_~RX=MV%rmMQ#6s*sB5wpcT5OaxRUU}@)>veHH=o0n8O^_l zVtYt;jr^&#?CqG*R9r*)e6WMfxJQ_LV`==zcxOb6;r+yjrY}KsNErLSWQtuAZ=S$V z0q||(pryTNoNYeap(Pg2NSy}t6~odLQK`W(mE)RKJI*OG5*y{&N$tnultB?OiT9@CZ9lbL^PnDI%g7B4@DBV|IfR0wX-vuvZ9c5|c^r$h?56RZo?rGAmra-4=s|z zM6ajh!e>Xn{xG?RVc&i}+Pem*$m#|5*9dMJNnOp9gF@l|j+UMD#N}4!*mu!ue(tzw z%vMkM-Mxrf0fksGh#qB+pVjMsbmOu0BC)8oFE5c%ie>&(&Xz0Hm4qQ?1T!6L>Vy>V zocS^!{zHsu*@NzU1nV ziMMsD@m=Gl0349^R9~&kp5Er+Qd+w=xV#p3B6jW*36{#Niltl!$37 zxW@X6zIpsK>*7Zcgb`(z@FA!#q_Y$~BT~x3)jRO0i%YUiRWhUZD3}*2Wp8+Tw;`l+ z@9Pf?j||^HHrR1Yt|5lEy(@fNp?piiOs<(ekS;OP|$IC(i80ZYcC$x)wA*&h%1 z`O#JXA+?s)7V%B2+?;@Bi>OXmEomQ)F$eEHSXDqbBtV!|$+%RH+?a!R8{iI__9#Re zjaDRsmDOa!B!NRvKJhmt$&;w@g@HlZW*AZ0dFI0i0_w{y701Q@Ll=E8u2=P1SmPCF zAh@Wcqd)csCqX8xc>^}^;(!j9j>|-7vg8O-Ob7XhapLd6 z=p|UQ&pdZ}J@{Jv$j*^r&e<~~@p zx-9is1I7s_izlOiCmzXaE_Nv+A=A{4??IB^bL=S@aOLvq>pZuJC*<)$Kdu6cFIaxj zxO60pc3u0OAZ~(uD7z$tc3aXGXqQ;xB}?UjIpt=w%Xazg?VX0*v5W~XNnU58qZ^8y zZCTTFp~JCH0Dx%irr}UAUy&{!I<5!O{n!@+hZPY+)&XjQuHE# zP0p@tP;4Rzk!O@TgzTXCZL~{5jlVF1V8$z%j_PTI#zBG}WpD|HjwsCU zza-c0wXr^v?qPR34-zdq>?h*YJXUcvbi!7)=C~C+wn7(Vy_3U%RLtk6%rEV^j@~tc z3R!|~uI15$WMK{kCDIX7H#@#@D{nGcK|p&Z?4o+4{pg?GgzgQ>SsYq~ z!08Xa#6&SLx>BC{YWSDdl*uR;w1DjBG<}xL8gb0GB|8BUUV9X2*3OhBau7Fi$xwe) z|H`v?p>IU!9J{@jj}gk{(pcCuZ9AsPP!a`_9OrI@Xw{FwIpe%EE7Q97D2ig1MXP@1 zt@BgxwN9FQneI@r;eWm@;YhxaKXSew`pYIU3vG-6SMW&W_bC_8o}HFP%EYqNttIz) zNCsn!;r+A6(ndM%Sq&v`KaU`_#q=#2GOS%AI0jrHuJ|BFQxe9cFIV=^MsI&Xdo)~Z zga(Z-Zutpb9RCW%%T5c`_B7Cb?wlzo5}cT3nM=Ca+ZTQuuTddPK~2e}%eWeIATzEb zTPuheQ8+VIdE40V@3XPHb&WTX$~T=Z(z%teorH?o|gEpG>N| z`eGb!-1}N?53{dBvZQ}-%n4Z9ckAEb=Zj_fQnxshLjO$Y1#j<%JO_!haElnk40iqP zh&!C2<4`|Vu6C`XDsKhQ>aeQc_}BA>f5Cp}jVBjvox#MVy8-r04elZ08dsD1vwA5i z-G_UeVz}EBMKe5-%+@q%mKn7H5nbHtC$#|;bTv^jmtJ_UDp8yRhOahk1u8)bsIaPA zmXS+f#opX=?su40ON>h99c7Do<#2};m$ zYz7H??@({%tEvmW?0w+uQ4V75#_N(Lod@A8j55P*ybWXnc?$h_zj*!bka(sNH#(Rf z0k`5#eQmL<%!gwFvoTW(aF$lbN$2t-O=EQqtD9_&2rq^nu||b(1HEgoj&+f8wfr$O zR`ShbQg&&qxW9QE4d^r4>xAN1J5gpjMXzrnG4q;>lg$&`Z zkIt2}g3l~P3b=Gn;6`gb5o{QCaY3i~5*A>+u5ker0PjSpib8r^5h(TmI2p;;F!RP9 z(pbZB@Np@pL=Yq;0ADeBfqPlxgOV+3hBC0- zRZjyk7vSW|{7&h-iPTgrq0q^^#p|kmX#9BcShK;xIr_c4z5Uq4m|21{4{$3%0D>3e z@6VSc`EEfTonKSTYL^p&@#@r06%WmZbWBvn4` z4^0Qn{~rD67qc8`y*U@NCT|X$@~1>@hn&M7D4JX4qWJ0~DIZ{vS(uxGjkvsx0{v@! z6HvM1ryrKBFV_zEBEKYx!d~kC@py2=(YsYofq;#p@%>@w^k0I5tQ9ho555yPJ-6`LWLN>i(>vSbK@7`y?FRobv}56$FK zBYBgE!#>g>HgcAnbjD&oZ@LPx&g8dAE#s9H`X=+!fDTJ%_@~Gu!GD*i#8B7~yN%T+ zv6U=p!d3FzT0*qAW4m*F7Chk!l@DLvnqsa+mwzkL6E&b3_=MjqRs&dDOmZ;12Jem=``FCSo5U&TE5 zw6|izU&pYeMD<Rvhf^gR*(yv{wcMQ%e8N?;SXHb$#|wzA1X*i^<5f5!1DzMdn6Z zscP(b%bnyiNjURWoU&{w0&7YM5^~p1r#DYLUI3;0lub8NY_L2}lWeyYm??EWeeo~JA zli0Mt-wd+q3>0KbNAE4i6Va)lq}N4goFYxd-pXnQ@LB@$o{kbfT>_BKEm4K5Iz_IB zkU>0?Xm)Bu zQN(s0TPq_bo0m9$GP*dv^Tab6;KsVTC)*`Puo`iYhcsjtEF&yYEs}@^%n4MgA({!z z!XVd5U)3@S9kWKUKG?w}PddmiujtX21-Gp+Zz35m?0^lYI4V%Hy9U9JhPJa#I$y9H zPWrCg8`eYIa05X!U+d7TgA(B!-Na{V!6{Ll5?CdX*lH z@Sn1y|FkPMc}~nKm)(kn!AK4d_EyZaxRf5mG=))33*Ks}<3l3S-CCC_V!|D?l2xDQ zHN_3Z0@BK1t>0D-s!dzLq_%8#`p#5{Ex1Bp>4j)uk4OQCN4%D0+=7^IcB6Ea zbCaBzX8Z7h_mFU1m(Kw20vNP-}~nG${mCs*47^aXMC!&XNL2aDdSFG_!? zyPlwNc&Jps2db2s4P);2I!6Z|>}hL5xst@xIVYVFPSU6P@ON}5IN72jdXp={5*>IE z+hY84#S#1tC~-JmwanW`Rd+`;+4!(RNox#jr^A1!me(4~0Ut)??G>c{vv@TBzLWkx z)!v58aMynua??mYk+FfY&E8FIh0ygFWeJ#`A#Q7PK-Xhrl-d>X`B;q*hcM)AX4&!M zQlDYezKNS7Vr5b%23+4&7&s4m`XR=UUGTqeFk3g)0o6E(fB`P3$W+$(he)c2_Wa9| z>x}o{r3L_+>Jux}WX-aCzbrDc@A!&VP&;UrHkA}8UVpa6D?pQwZS)fUkc5ibP(BVq zJS1a-Rik!J(;4|1i6n?Uq>{@p7hka451(=SrmuOy7i6DQ=4|KvsT0Ix9X0E4fZ8>d zJsz|Q@x7u;@|fXt|G2t}cyw-EW=7a4Avy&x$+<%vj(pZAO%|w>f_|)m1|Zevxo4@y z?o(3;QxU{zcdJ;q>v2yMF5-Yse4i#sRvG&V)sD&o_R-I19PUQ@q0_N#qo}07oecECdjgEFi zwzMKpClB4IGVe=uT~#n_x6XNZjcO?lTbmiu7Vhr1kxT-P^I&33Rf&x3{GNcVf%kn$ zvOFg~U{vFpX&E%SN?ve9C};b8_mnJ0fon7Kq0$7_9^-4VH5zNm}1<(bn_6QQ}j~xao5EY zU>LY|?fqm%kcV3|Q}vPe)*R{uutYyaJhC^hI6fS2sJtLk?_g)FKMcw!dHjM0=HYPO z587IvSyZkh2>kw-mQlN+CU3KZ4A$F1Z#q+eY1Be|o`!QV_63WPQAAFNk{04CX4|hZ3B~&?Vmfd|1J%kDiCMsUMjk2MGQQV zf&VM%6HPAjO;YG2Yq(UiBzx+@;COVKxFRXsM~p3>Zgwm?nr)idb-cDnL&hmpy`EQg zFOVQ@Fv8KP{ue(}^Jxe6uXY>h zAG=4=5@>~`W5PE<_xPjRdt$5L-j$dp!l?_;P*=66Q}ZQtl1HB2S`RHqs!Y`h6r9Z4 zfVuQWEdgnk@ws!I8~onILS&qScV(9%zLx9ASg$$8(F=!z?^1vH-Suq}!0&S^4uRt1J2>S?$E?V{+CS|!k`h6tnW}G5X z!Y5jjXaYf}bXgX^I18gFY1ed%P_5l2;QiW{#YAK8d|-oAo>Kkdj&1-!mAG}!JulRFQ@vLTx;l@ZPZlGNVW@_br%sg z?J-}cW2o4_zuspiB(7xD_7cIw+KubPBHJHInyPb_=Cfz3m?Md7?}FeF)o!o;CYMXPIIubO?iut`H<8LseHxao4dos zzB~dS4YzVB?OA9`dkcK|aB2x7&QM{UWL{^pa541PhL*pvrT=4D16vn=K`r*4Go0Zd zziQ9AL|8EQw+f9k&B28R#Jl8q=(eoIA-oiVQ>OzK$CJ5F#_hi?q5AejC}8gE z$fJBX|v zAxB}ZfCj}PhR4xu*^dtR1p60|ZZ)NvY1cJ$ zP8QcIYu&<+X!W`qUuyQ>1rs=sxgD*M29H4TWO8Z2NrI>d^X2mds`=yru7+vGh{wkR zA>!wI-qCAYC)047%-FEF=H?)`2@zd91AQO~5kd}xb%fqKbfcsemFdq)yY4l5hT*nX zvn-iFVm~`s0S#uDQ?ZV$J9S!K6?KOa&>%zZI!`jNpIX+lWHk(9xT{pn&t6%Mdxv2w z(RqlhSb4H*S*^2CUyr(HS!NNmWz7P{gs9gLQWWz@|C*0X`Lh9ZWDb2u9`D13Y-n(w zaZ%daixbk(=wi$6#Qf>GCBIPR|5o?@o-7~Roj2y@ouc2;ck?66r~FG)>#`fq>`#^@mn3$eiX@ZJ)8N+g51KPce7fl_11@x$fNZ)I6tUxS_ISz{ z)bHoLoWW5kB{Ym<>B`_PI2C_e>bj^5=39Loz^hAlNwdPJMmy#d7`q5P6$PpL_f+jrSRh zc81gMSD=LC=2$mBJcLMMC-qg8(B8-_w$9U?kgoUZQBwydn54}PS{;JlUFC?a64*R9 zCi1;_P=3fCR?=ZXh~89TF0o`QGLYECr$@6#lb6|IH9Z@G*-J)U4Yh0*$v40u$u(rO zEl_K`(-2unwTP4K%F_bPjvf;+o^7L1n4ebl?^52X2}LmHG7zwlV|&^oiE(E__eA2E zcKV-`%C3j>PB_EjUNxSlH0nV`1hhTEU5gkX6PC909SKv8tN3pWN4q{{j0}#+&U*<{0c0g(EA_{9!o5!VNDEyc>BAvi3 z@9p8$~FmRIgTs5*?ghhHa)fR>=yWV7-rs0mb~8sx$*T%SJgcO zr}c96=yCHb#7WSMxkk+8WJ%%T3ML`c(xn#vvfh)y2!N+`?uMela<^~t{G;X&Uz2W#hvO zZG=e6*#cz>trI?ed^W11CE&C6&=X45j~Mpnh1_oXH=325>A$=N7uLcb9fRDaKErt* zI2^RxbRZG`f>rgTBh zn-%lP68%_vv&NZM!L7cLv$xG=W9M1u8(JMBquIQ?D7)$`8k`h(11t#VIl>f@r1zw% zx8It4oLmZuTxgFj%%}0ljDagfA=K?B-7(Qo_Y}-r3My85=6h44x>pDno2V<2;S@|rz95V(yj->Z|Io~ z)@tO{+gBjA6)6Xk*-!!3G0$;~^5B~&exI~D;6yTWN~n^>)0Wg*8xnsD1<*?M#BSC# zT6E6`2_vQ_{C^+q&LjdftaZYePo#FF8R#%3x~|9%qr65MB`eSG$WKp*)u<1nWJa85 zd^al2`u+(>><6Vo#@-ddOLs|dgQK{FV|V*A90?3>7AZSjYfDx(zW(9c>>b+ZEB=tN zqAHjL#d8zcU=yl;N-6@H!hoGZ8dMjO{*b^x=|wkF_C5#?bPVFXH(86_#kct}yta&n zBWdwrqo-6*f!EI6dO9jZ6=BD|sy_yE_SI*Cx&m;3UmBF5aWmdiFEw3IeR}R^@2<2r zw0CV&@h3I3FA6*0gVUMp@da+_0$q(slHGZHg&Hd4}o1~ASf_2Lgjr*UK9;qS9W z9=URXo~0uy^4{NBJh0r5ekGcx&Q6;dIS^nGP6`_n7a zhpIXbXD1DA_)cMb@(S$QMPJKqC$fbtZ^q*AppR@y+M*PlwFi&pYDj-60PesuD6>dX zH&l^wYi_nEgMoi9a=uZ!>QmG~jzjCvp_{GzlVx>e>$Ym@XDB#T2sKotLr6ZVpE*NH zUa9y(l(Mr1$kG4EsVDnL+l=8I;6HY$=7b8wrq4^P!R=nFWfO=ziDzxzZV=A|slEA5 zB4ENVY3zYiX1ufwJhX+6hF>A^c zUJQ%@PO6oEQr5f5BClk%Xa{U~_TGlBW6A3N6h`Q9rGcV+dkk}r?}~wp;N^!=YMElY z6yWbRG67;Xb?$URA*~Dtf@rPgnFzCUX-U63rkB&ig#r>}RUhmGAd^ej*5__*?ISBr z%a56~c3rm-H6BVG=ImZKJ`e?<5*k7Zi$-f{lr-cNvgY~xpJiMI1F(PGXR~CCMR4xc zl%b^~@oX(i>#|cKGWuXvZnF5O9bpjn`X*2#@Mz)^kO)Vb3yg{np-Im%wXXYjWSs(r zmvU}6dC@5RX@%kg9v$A^au3VIu(N2z&xP&jHv%zFtJG zF7h$#m{o~aS{_&niHP33`oCn!QqUawGQuJLs+u4zT|%yNH14=y<(U+<^j0-=*;V#c zkR-;iqEQUT5PSIGZ6&bsS-4dy8hh$S^53#z4#(ZxNjOq@?_6s0yjr<`#yEa^AZXuZ zY52LFd3G$SrO9ngGJMZAEL<3~OZ252u@2V2gHSF%AhxD2pmEIdo0>P818FBr_is9# z$`-Bf+jtF7GILk>*9+7PAU}&XbIYHfqKP%l%u89ul^?&<*LpgSvaI_ejayuU*}(KT z!cSqrqBARYL)NQg5$k?9=CyQIKyBUBdi%60DaCo>Eu?kiPB7EDBi46rKC1sK^&9iL zQXVEys8&~Y$rtu#2h!(Ezqw1`>yc=ch{HP}G-MOM7kzU}FI}hJGW8|V2n)ckEm0#} z105J2f?oxUOxMFf(TEOe7e-GxHQ2SA47d8uWFkNsgGq{T97J)`x*bE^l+i077ZwkB zy{K_}2DL&@p_vUDKDw9ZdUQbRhf$v~^t z2xx!3tZUb|;@*rnr8VyvM|wtHCwNX}x&3U{+Nb6L!aIGgeJsA)iw(0WCTl#=ja9t z`56K4XIHTCJhE7s zTRL1ZASJUMPI&kcJD~dNcXk|x+Njk^@3UFQiJ1Cj(t=taSmgh<4`tzCGWnkEIX+5! zLwCfxNNj@pHX?x3ii^^`Yw2|WFZ!Xj(`YT&wcmN99K%rdKm{MTldVM0!2S;U~Namx=t z!3_3aXy7p0--V^J%azZ@v~ryyR;k4IX_8pI*E*t;pXVeJ)$!U~>#h4LNMf`5jcPsK zWo+h`_dH|IRl6;R^*yEXmFXl1{Iz~eJx4o^aswlH?+sR+rAh7(ZuCCJ0B;`e7xU@r z{xQxXk8ef9E*hQ$lmE#)mr%&eoXQHvl1(~5{~oXf?+2Lz;fwwBUTkO;YtZtg&aO-T zYsKpA$h4ndF_Rhd$>jdVrrgV1O}=NSV=*PBv6ynl{D6D>*zwmaVr~N(QI~lUo@m3t zWgUKiQ%QXG&n@zvyt|9w<@9#<;QL!(zCz&&@{`swytK94JB2Bv zMWA3YhKO|f3$-Hn{PrAE8#(*dm{-DmPQpzK^_rhW5y-XOyNBT@*6kpIA!V4~F8myd zW?Wkisn-bdx8W{M&$Fk2rtI^dya0P26dx{^&939rE52X<`a(H9*N|b{IEpn0H9=ae zq6->cY3kp@eDgWr>}701)3=1%mi}9#-tK2c=ti^p8)}aU#V{9yv-3YuKY}jmVu#}x z*pp5A=5E9C_qXYczw}MHoX+zJADsVjyy5er;Z1*h?8sg^*6pot8IZIRv!%xjS^~=E zIl|@fTbqKm&Nfk9PQCKwV&0Sg1_0dT-)~v~qlHh(P@D8uJYCiipb`OY9AP<%k`<7F zCxxR^LxM@J+?z)f^A&*iUp9-D++Qq@i5 za_(-QX}W^2r;-h9zfh76A3S<)cLe+02DoL$_G0xdZl@WW*o_6)J_0_z&#RxxzBD@a zkfw)WyLGR=e7X+~$lQ2`m=s9p$JoZ*G${D0TW1o#%$|4>^3IcoVfx@iKHaM!?Kp)~slDXtfscI!n6QHo`V!*L@ARt!J)q#8vX%}7-HaQI!t{_L}E~xG|9*~vE z0e=-SrsXBHwo$No^?TP$2Z+yOQ|Jp53n4PCTu`ySn1$38nEF%j*JfpO0983|ic3kxu9R!op*ji?-8#!H{m~Lgts;daPB;*jK z=WE~X1$skYU0hSfKGa$-i*)r2hum#+e6%L=(Usn1SFeUP8aBZq4!J)^h;s`}{5=f* zEi|8n+;lO8bOCiCu&8x+Ub}k|Glnq!@*7`~fkD86T*D8g7N0ni?FMFfKW1+x!Oi=cA$UHkWAsI}(Msno}0 zt}?v(Fu2v51g3um8=Yvt2r{j$zMP7RUKb$C9<%pX@Fi4xmdGl|mbCBDv_o0_$^RI2 z;eR2@1uyN0$q9ww%q(c_$vQNA6|BxbKiAA`^B+3)I^&hB0mNuIxRsdg7pw_JpTfP* z+?4$OoOxw2lM36(6;FRhLo6@BVx5JCq}`LB%0$#J)@TJRvJQXjg4zlvQL1;FE1V)N z#!*kLGROG)Fb+q2@(3FBvOKm*YMFFU^twta6 z;jZ6;&Y-n}n4A;(ueq2@^9jv`aoY~!mPV?{M8SIs=@?+$aOLl$?=YIPHz7oTJoB@G z7j)^EF6=C)*ErjBXG@F2;Z9ohT3+m${zj5r@o{v>frac=coN?OYFP(CH?O2M*=$?L zA+zvarV*E>#o?-bpIwBqr-twl@rNd^2BKwt7e8&PYR3h6Dtl^

J-4S{!6`VTzNe z1R83Du9XZwa`E#ChJ~LDVeEZk+vDuo>Dbe z4YLY$j@VAV9%pVBy$QS?&DDE$qyl0uc&s$f?pcdk2q?zBuhy7%C$0#5iLgIJ6^Xta zQy+_%IlpMM1gaHjqjDhuqPFTNa26q<`uIcc*q@(oCsPjT072Kk5h$@B$>O`OZ3 z*qAFtYz>gy-QN{b(Lef?vvO+r#S6wGMbYXb&g;hwTvjt#v;Sy@Q{UWeE+du|A#^OA zHvBi$RUI|wZ-O{hN6(w%8J(PZVN)%iPc8K=PBpz+ro)$e@|_Ra2#5_>(f?Ya$LF;m z_x79324-Pd@Jkamj6E40uSrTIFc}~c!+#OC{Xq+LXGwdEEr-BWqIwYxdEvdKZjent zBFaD#;AMrTe%M^o?=twnUi~t{L{4Zg!A9On{fc}hHT#8--q7>L0yxa16JtMjal_R- zm3Tee$>6^Z*^kDEIrf@9Q~B22W6N$H*=3CvYvBT8vVYq(dx3whE&#^bwE;Pg^fh8< zjnoE^w9+c|q{qaN4I^2lCsC4l6r22a51MtT!E2dGaB;? zs-Njdw!C?c*C{o1q5A|YqajjEF1!zWvQp9HB%40;jdlMxaws@E{!#GAGF|KMTtD3c zQvjS@UUV;|+Hyi|tHMZ&0>+NOEC7dG!|Az|UHf~jVyYd3xlh?PVt$edyO@?6PYU5R zdj4oMqNkk7dvbPjQ^H4<2Se}Di_0~emDYx@Eyfo+Z7$i-#r6Ag!Ajv6q=Y3P2bvFD z&#eW&M~j4egJjk&(d917hbl?c%9xk{G)?q;L^NYcO|cXC=YF?TSWov%s@&1TYH*^A zB0dR&G#ahL9fD>7_le$Id7FKBnIzb&9peO9>dHDf;@0>X4+IFlwxYz+{h zzf(U~{t}WHqus{xSju(kV4GgJC3FoWrZQPC$y`+!(q>O2^$911op$1L%*fyLbL0<# z?lX=CFB2IV=CyH=pUHw`lIah!PZ<@3KnqDLH#s6|N_EDfU(mGIg$GgZLyvBlow}TO zk3etj09(cGgsu_W7(VW4S5OCaO{%|h?yYXTU7IR9>}tzPu>317qWYSSO1k@Jw+y}oLX%0 z-ATUWd~o@@vGCsqSU>5iR&M6dfNqP5biNFL4#CpSH@~p#G`Ouc!fY%pUuL=sV3WWV zgJawvfMcPc^tNwF%AF-FNil`bopOX8P;aAt>^~S&P|Mx<8FGlT z(sAY|3vIzSh~uOq`X2yoK$5>xzU3;4mVYNP`!)4&f`kV&ud69 zPcQ^X<}GRO+&e{&b*<6CJsw)$XNqMMt!2r+3ns#+tvo!VY-c{FtzzaW^h|F?o5%j* z*5pixPav2obNS3aOiqLf&^n$eG~+!S$2ujyv%e7+JRkccXd*jBGhAD?*HVrfsX0Zn zY?wU_+EwR!<5fEU4Z|X0h9gRX;^OcUDLtIDloHEGos;KHl_ap4k$r*k?t}=rLX(I66jqPehQVAjT>zgo`E3yvL zyhd6H^$iV;h5;ivu_JO3AF;Mc2g$}Iwaa=9xLyU0-Tr*CwRxltIJP{%U7zyc3ubc; zap=KSZtUE5#MRV~%j)jGU*J}UPKDL)gvFPhEJYxKh0e=`5Go0eWbm-nu&~npc|W_W z1Lvf9X}tXyqua3QXvzD`VdHUpZIhif4IMdjbGG%Shf+u-dxL-7&_z~_Ky{D|VQgn{ z4SF&ET>_n`lYQ!#0qc0k&Su5$>{XS4G3lm#--A!pl`dFpP3n?U0WHBYz8g-IrZ0e3 z1$JSpi}N}lJYDuP+EL}w0IOhzn7pc*7xjz>t`;+hb;LOT&Q{1WC)6ey^Sc~wZZw9e zdOBzgJUuno@rzQhS7ul`;39Nr)q!Xb?C=McMe5%D1G@y5p3)7 z3|~xU5GL3ce@lj~^xsty_jO~55bYnH@qgy8nX)Ut!Y}(D05Cw$zlCMvzDKe4@7VP; zjgdoBwvrbOW<>{F>-{?#=mwn?{iKji z#}cmA1Y@T|>ycD%^mtm(EG|}9O5#C-oQ`Ig5KEv4O#y(Z_yz57OE>O8hl z$shRkeE|ott=kDMC%lU5XeK7xxe58zw;Z0_;Ge0jY+Brpu-?VBqh{MGV_;54Y)6F41q9qGP-iJ4xdtou}~TvaBv{tn}oDGv_jG zh@4}u6cxHUVR7d@yz($R10f^(&O(pnTFD@{M#~JGF(UmL!^f6w9-a@<(@I_NYDQOTC_nRbIxlL*KBB! zwgUmr)c*j5X?eMDi`%KDjAXSf`&1JAz^EN+Y=iHbuPZPIZ8I~(Jb9waVHB74(RsHG zoXo^7#cmGi8*>4U0>q&DSIB-Owzi8@ zdeQ;SNN^UAkqe!os*+Dyd!9{4K!%atq(RL#oV-#3Ry<%-CY?w{0Du3{_}od1=A+Ff zXvm%zDKkLkfE}8Z7>-3nQU%B-(ts^OGRvB=f={h7+;C5zsaHF? zRRKD$BBnq)QxkShIUbd6G|mlB6WgX`2OTSFF^I<;nx|ovJoQxyR*P)2X^TBNDy!?8*zDx;e=|{c7_qXIq-e zLm+kOxGs8*=Nuoz8uWRgc3tw!FfqdLGxe`D@k|B@{JA9D9f0{p?xP?DInUs6+}Abk zZjr=zhsLqBw%H%%Y=ETX^ZbX>xcf~zQt=Myp5bim<>bvTr%dueC)1z<@T?ZoqD%QN zrsg?5a2)O`4@J*T52Z5qP?lya%t9{;J%^y^DcF9eaTmkC3Ef+HPvQw0<&=WU2gttT zGUw_*uP)Yn6M5n5-!1M$pT!i5{omnb1M{t0T|!il2w0Q%zEhF-q#3GZ7=dOC3SEkK%4A8}IV-DL=er7#O0zd@!en#WabfD52 zm=h!@Pz7fT$=psodGGqyNnkG>i+OC$*urD6;{a7Ok~Ja}IhBq|@so}-gZNjbcu!7C zc5PPicvFM~aL4SftO7l{Z&e9WkWPq72HkFBRySOcmM;uYmqDvc@ zW6F7F(}B)CtE{usW06<|Qd^HgKQHPl4tp1}kU1=ixb18ldiCr@S%Mwx9nRK0BwmN> zRG6ANWV2F}KXV_Gk@s+U#(nA=tuU;z&X~k-2vlRa>-qDMik8saqa;!>T(I0g&mO+@ z(_6(2$W*u>8K3WC8ON?W*2+c=9QD74(WFM$acOK(RYo!951R+L`Hn|ySEBf9Pg^MB ziO>M5jzIqaYbwX{8T_cTwrgqGvBb_khuw3Kew-Ta?X6J9hEO`-kAMFFRVhq@ z?ZNb*(BOPQY9O>SD9GN?xc3~cexkhM#&@*4Wpplsx$eKhN4UvT`B$OqlTR7hI`1xe zj*gw3bHT{q_XpDzn-_;3;hN$n4Dk>^#tQ}VN&0$<`Wf`*Efflj#Zw`GDY?W9%OCu22+o^f2VKOwBV5#?ZD1^RFXQ7eL8Y8 zQ)3r5a}n;P8%m9?PCk5qN7s@0)28z_voe*!@&`FS+3Q!GXWC7vT#w-=0Q+>~@~L52 znK2>9w+9~GsIXk)JgBbZNuqNciguEqfN|6nJ;iE5sN1ck>iiwU0lywI_*TreesZ!F zIX&uUoItGgCLB7f$9%E{{RoASz8aEbaL=|Dd+IdT6QM(IYo{O zc?_N>E;EV}G*Y=M-A}p!j)bV;PCrBV zQ>>6i?=v9|K@EAwoz*S?dL`ui-p|EKfXrq z*Cw`zW{6D~!Ec)bwn0CiHO%S}&1I@Zc5@_BjlDkUj1~tTn}Gc4eJ563PHWp{Wda%Y zDd+f<5&2}%VLs;N*pRV~5y-=J>P81YTGN1G5y#9Jb@lYBmeNNm3xYCPc*i|z&z&r3 zw;7Y5$4ryQ=yO32v7lt~#!1Umkv8?*YJml^x#`xdTcc-_@{WVGGbXeaMJJ%*v9D}R z*g4yg+a3P^m2_>l9G;a`!U6*F$Fb@8QxbMQM)-&D*H(_^OZ!x7qb^vU$fMT)3i;yd z#(P&HWhz0%ewtoG_LyD<84fUO;59E$MWO7T1?{u@~auYY6b$H4s+>Ohs^bvYEx>idF$)= zR=i7|aqU)yc0L=mX&Ij#vH6@=zIcC6Q39!5&gX$%ec;U`n@Hth%Voc!{8+pTIaujesT$*#2Dm029`#+HY2aiwdip9B;YaVvvKmBZq>{slXlx;Lu5wvPP z^8CuZu*7g1o&h5@#%s4aZL9gQ;g=Z89D&H?c*(~(AQ6i5<-N|B_A)QEBOD4`;aGA4 zfrHb6Nj2uW<>WI=B)MbqWPk|W!N3QcFXPQe;x7{0+%K1ETPLZ=P&;r2a1Ym?O7dM# zQr^ZRC(Fv z_9wk(wxg-bAo9s&9OnQi9^7+QjO8Nin?T!U%OehOrw8hDjMCfO$+YfLq;&`Ms=@B% z3c?Ogb{Om5JX3BiiB?q@Z*GU_=}epTG>q^^o0&)59Y4MG^sYQSx3^C$#Z(-#lj-T} zS0%V~3ar1oIpll)09wqNK1SC0U%Jp|DO|C6cN!qKw;y@R2tv%St3WeI8hjZ>|A^D7ELOi&a2stG8r^JPrDh!`Wd~i!C`4qM~`&FlF zdpKAuyY62LwFW|i*DdSpC<3h4M&B_*jok_V01AD%no=y>V87nl21lSgXF2q&^<~|^ z8Gg;_p8o(^==7+f)9wD*b0^Nt@`KkWr(XU2yY&>oZb@Nj4Uscku-u%59b4#o4^#Zj zY)Pw6CZ6|_mGWZ&B8VIX^uYBel21RLYm=JvT-`bVc7u+Y$Q*i(^{o}on>vj)C|t`E zaO96K7{d}Voc0{hT78a}!SGM1+#8#$BK_jPbV2+_J$glEYzu^)kC-0=cewxopTeRh z(n;BE49 z@mAuQPg=8pd0Jw-L}fnd6=CCMj~~Q26)aH>r;%2+@x@X`ZbnIgop8dS7N#i#h`oB& z(HN+ukySSuRvMEiNa}lKt1{u#gODox%ouJ%T=12aMh(e6hyMVqYFV^V50xqgXy)?A zUPedeKf_(%6xeYog2Xejv{6PosBZ}Sf7VS)PD}XvS=KlZ@Ri&^t7VOptWO6z4 z9o+s!h@@zl%<6hJoDwFF&9RyR`?)uk1P;WpJbpi3D}b8YSCdrx4eaRll5v8=11CIk zHsp64AFX;lyIks07+&f$kbUTYWB80``qsz%BNo>-N|#P$2y%xQW#^1CVE&l)s!VO$ zbDgp9!%J(i(WHYLOm1_yblimF4eOq`=drDeZwrfgVO7kCiV(+XjsA93jBZt~(#fwqk@J*hqElkob`Wny{@K~`h zk1VzZIl;$HeQTp!4gjbn+CfJd2iK?aqQ-pf?sM_#p`T%Fw?l#bK&v8qX<1wq+y@8$ z0In%+G`m^8X%l`=-s2Zpm@fZZP3=bf# zdeqCSBmfK8WKN$jRwMHOkL6OqEHg@cw>y)AwBz#)T5-r%8?&CnIsIv(Uz!^o#1gNW z5F}%%Tz;Kt(GxE0N%MBd#y<*KE+YqVR?oLJ5pNbc^ByqS0R0VE z46-OC$3BN6`BytIx{X*FBmj^0c=});*0Zj)Y3z!Hl}}2 zx;!z)2Lqw)Soeom+_A+KxrmN@+~ofN-#i0UVbz{kk*0OEc0VZuI-kq}D)GJyxw(=T z3HOzLVn@((Ojc`hDejf+$hw80oiJ6R8_CB^WPIOF{kg7wZAa~~va%)YE7hX}ayo(! z%^QxxryY%HT+O52Mdh`+%9#WfZKMIzjGi&yk?v`?HgGxf5*B4R4Iv*k-Z7T$zWHxj zMv*-1OQ`AlSp z=cpWx7oSIe55`U)u%=C6sE+cYucNohCCGxvZU53$c5g<@MpacMop z^ilHWHuA~opOoY2>6})qb8fs2_+^xoBivz=@dqM;WQ(mFI*LZfIRp}?ju>=l!X!94e9>VDcVWL_p&PV*Y|OFqT1CIq*DSxJ+iI14{%D+;<(luN4o&Ho@kkZ z;9y33UV$0^~F9+1+T1J+-G|M+(#d#mZh@_U%Lj|Fk@zsgs_U(>6>(-X+vSuXh-;Dih6H=c1!oXW3 zk)ER?KY%)iaH;jnx>z51cqHV3&MTjuA=lErrSX(@XztR$k&~QY05k3} z#eC_ktUhxX)DC+Z>r%QUs~o#7GHSEFGgl-vNO877&YX~vt6N??Eg)A+8JwKx&!OT{6GwEon9QsW|lj6DS?=}JW% zIG_ioTDq@+gH;f7RtHchKxo9{xA;}XbL;umYE_kym4H)@Dy{~Akb6@3n8SP35hI>+ zQ^pLED)q?}?oraf26rIbhX8}u{QK8oqS#$(7jaqI$L2`v03X!n(>0N#>GIw*7SN{Y zBqWbv?tLri{{V*G2}OUjGYI2Ae9M-=18+M=rD=OBmZNj3)AXpcYbdSl0ePXrNh0)6 zgE$%JJxS-BYMzO3>fHD^+AIiDmZps4! zh&_0$`6MNZATCF*_){};W!sHe+_#jhibn@_KT6D7@{gDKsgDHB{cmMQAYiw+?fBCR+=#m`2r=*JQ<rpQsb0%8tVT~?LQnJ+*jRaXcDFb0 z51ScP0Z&F{Vc)+UtIVRhk~dSGtXyQCGwt}-QzoYJY0CqQD-z6AuotKU2e09ZXt=R& zF3sG{czAP+#gUPo=i9bFI`mHpD3yM2UK>OXM{JXUpKRBU3FB!*Fy%wGSP(mO$sKFf zG-sPl(4h|*15l93yBvgzkHG+2D6#2k@xLKbKxB zz0>ub7UC5gk6 zE3@`kuWaN6ViI9M+48UfeqT|a#-*csk8)D|fRp5YJ-Ggrxu=``KHywJr{(08?zPCN zX_<)Sm~Bzt)c&8+y%WP1V&>w)CBSE8hi6`UXa4}N-hm#Qp=zlTMJ^lWP}~fF4>js~ zZnXob#DIg2ykfl9MUL9vSC+-s7Sp>%oOE^mA8xg}`egR%{2m)_eY)WIHL9ylQf>`LFDxBROh;e zR4NV)XB$n0si*v@V4lpzD;51xIp49=qNmMF!; z1J1?+BeB8f@-?>{!ev<{iKAh`RwN(GkbgP}X>N-#+r-LuBC+UmQ-x8^3W4cW!Bzk% zBmM$uHe3^rlnNB;m`nXxhpT$UhoBvcJ;apx9wS5eR&58;pN zSzl?nj$)ov2^LNc?4Q>-{3#ZfW@yXir|(4mTw}0Y{=8QYbrkx&@A!Zo&ErIw7WVv@J$6EM~WJ((+sB>9^;=w zT(t7ZrV}0Vd2rnZSn$L5ZlJJS_Y5<%{(_OLVvfShg@_&EK&69coO^vU&1t=~6U4hP zxH&lXt_McCllPFje)3L=bJcUy`jLb5uIAq5N+^rw-gx?Y{{Wv_K_u448Lik}AdcZ% zbkPHc1Cfjo$9{+NtPNjSGTYeM94yxoIE_6>LZD>x*!Ajb*KY07MdWaJt_xT2nqF#l zx{6?nX;HvEcScW7spQo!F{vh1wAKa23tR2HNM2tlxGXWy`l=Ns?`0^!wn6u7pUCv2Cee9V=Lgd@4a03* zvBqiTwoFj;3z77y_mS_9dHcvRIpTvfwJ1w7M*GO~3Hf7D*QY1?56-ibO`OUuB}I15 zK>i5NxA%{~@~nRkUdycN^T?{-XN==}0;SKe?4POj9eqKfjK{R#06KIOSf?&mW1B|u zv(EO%`M|34q}M9JACfvAl@_SU0cW?V+kEKc^ zEi;T`leZ`T0M%bQYB9qZK3cK&HT6Gy>Wu~4g!2tE9?G+-i7f!U&MLmQa_Tl7qAsb=9!j}I4mXaUG89i&~ zPlbXj-B#*(9Iuua01Se=xd%P5!TQ&!Yj=yK+D+m~B1`C`kXyVHVWZjz0f+#%YyvtS zYdWs($5bPr?}=K&#+Du)yF|1wPcbqu$}%_IaB-ZJW4I4~bBg%yT7j*ZyxB5YcgD-k z)NWOH&q2u<&~;A|YOQUv z?D4}H61!()V1OP*$s`0t$GN}*rDToVZ4=A3PjiMRa`(oDSr5IjXUQv@jZZWNa%HQh2Rf zOKAXOA_Y;=vygtIRJvSk4aWPpqmc*jIX{;jK9riR!hPFV{IYrgN8)kY>6)t<`Lev% z5JWIo$itxRRlNBnU6~On>^hpK6eXW!#&>ne^zV^cve-!Qtd`C^wK!!w5${M>Y@kF+ zGFvQt#X%>O4jHn2J5!{vofmO`l=}3lVm+=r!_(U|5;?&vTYu2>rH#fwKQZ^MJ0-WA zm;n)zar1PmLrgKd;N%g(QNgA(jFw2{$Uv$`VmSQjp$`ecQit;uW?}O*qqk8@9Qcp- z!*YKrLL9dN)}SvLJ3-DpI5Yto7YEC6j(Yc| z0#WdItq9AUE=zi5p3oL7;c|N8r2%2FLd!BU9vhx(Quo050UYN7nXGh1{;Q}(BySe<$Cf$9Nq>~_ z$6=iQl>2+_x*S-_JQ5U7_sGuUo`G?IJ${wU-A^0H3@~Aw1mNW3rv6U{(zcsUk4n+3 z;k&urHQbEOxI0oZ034ooWH)cdg3~ngYqz-b8Tew@Y;`@u53gUXdY^})wnq|4h*@_Z zGbis5O7ZKFit?x}7!wnx88?)*EerPkPGQF99LHj(BJ zaZt@7&a&H9)FHcyHw+>9XICT;8}R2pUrPE5Le}8bO2K!C<#F@*zxksy(ZvJzt?V5he}^o1mvJyDgNXVvZEK&q$a;X`%( zY9pyTu3db!&wPr#4rZjy7G*#goZzA^4A&~%>=$0F>Bim!d+aQcD|^Ar^0*y&7;xjVL#(~ML` zT~Km1y7oE6Tvl1|4!~QUG7dd>rHax;W_4koUBBW_@}SP>jau}mW+hbj1b<4D%_o)n z!}9u{Ps99bNkUrNnI@S|4>?dz;r{^Yu4?OBREkC@Wn}0Sbv=eN$)IUm*`HKfO^VGE z7aT@gY4-I$&ZUY;rAL_|@`>Z*MFjeM&9=Ggd)xBRrL}@VGxEdw*VhvYhllv zM3*yMvmVf$i};oQ02&0*C)pzZ09h{iWZm;}T>UpT7mq=aR?UsB-;yn?cqMo^KBlVO zYW7wU`B+~$Q*hqfi1#?le=25|d3&iJv`HWk=Z-di`!t0+nkF$aOhfR4T7{>G z95Khed^zJ^+EOVccsw+$@!+75zy$vQv<5z3rgQYKIJW(x{C8%eHJ8qhB;sSguc0Gw z1F$633z06T=zZVWm^5?ExJ&3j&$qpJ?~3NoEo~c5iG-V^4r7s+5Lb}PoZybce~o#6 zgnmEh-YboCUlhVL`-gAc&l&l0eL?6m_*bD{T0O)~B(AfD$FUse7-BLw z^uZ(2vu9O3P9wlRB8t;Zl3SS-Vv1Hx&EILj$@l7OFT{VeCxmo6>22T;%MFlDw>zr`gi{S+VhiV&_TI?k(a&g7fX?>9n8cT>hovQ+Ej| z^3I;4lU_ye>h?(Gg;44$tp=u5mpu0rA32)1x%!aKods0Pc_l$N>G_Ux}^Sm zRtf@Zsk4arWDc&ue=61}q8@?t1(2f-xl%b5>v|NInzp5BEy;OpT0!Q8 zBMQAsV3Ci*r=@ttof1UL3iV;n`TT3s4xw|P=|T$y^DlJ(5+V=XQ^s-XdlS;8aq4u_ zeGh-O)$|xNd;47;G#Y-6z`MBtRj|Z29G7PP5OP7|n(}WN$zyNf`Sk55ycd@YC%$0Y zQOtm{#@NQ+EpkeXsOLPK)r|(~!${K3nov$NSKHmP}MdbgHxm|7G2rirlI zx%snQo}Bu}i{ldu^wlohfNf`Gj${7-T=IT=dwxQ^q6u2-%$fN+m#E_a;~ZkDr1D!% zrUF%ZeK{QuQC4&Fbv-jt(oK_?bsIa28Ak_tWPW{rD&=L-mvNHPRDa7qk>;m9C!OOK z(%w%J#mGSCY4%ab2lA}RwZ~;q<)pak(h<1l{lK{WX@hB!NpB=E>@jaAuIkTdqzUs# zV;#UhFXdfLoH1WpEbuZ1Y>z0o3<&GPWBJut?IUgueGeV!*er7|Dqv<(=cX~#eiRMm za)?Pk-tG8R*>t$Gxep`WqGXi@(~>cdYDCzlD$3a1Cc-@zAC?7a*lG<6tT#?w3BYdS z@Tx5%U?Us>&{9c>0*1#P)CR_$wPLZN+}jm+rWqLOIp}Kv~8B85=rb zl$Hait+*q03i15PPCq|?YQ!0hvN~3!g~yiW9FULS(t*V5>@A^@4AK%3Gk|#74u0_; z=QRegtR=%8I@BG53P}JGM;$Yf{{Yvj@vP4K6orXVw<+jI83+6;8hxoGsVMpQ+TO>$ zf0YJ}=uQzM##bEl>HTT*-96O1$O@nZRZ)Nds%sHkcl9Ir(#ZZ$7#VKM??DB#bdbiL zamuO4Pg+my%bANQW>p;kI3C<%wPQ@1%RJ0?A3^?q3T?9f@MIQHe!qe@Di2*xqI3;? zBGsU>@kF~kq5@2~DZCy$+;sr)n)mCS6*XxR;@mC0+JH)~Jh_vut9HGEAgJ6tOn7pKXw%6*g5%>oPsm;9+f7OT3sHIV{I%evNN@? zHv?NyeWn%#{b@BlI%wa{hn(YZ{-U1{xMfg!d(|9|RA{&|0OO$hfEOE& z6vC~O^JG&bjpsw#@HL+yQv5D6-mJ^CsU7Q>)Ea2=u{r0~rL$;6yL)jgYJSfsByozM zm-jM{Dp(kTc|E@YSoV?X@=fN(fT%bi^SkNDuCm_Z?5c_4RO2UM$OHZ3kL5w5(4iA) z;!s;2V}G;A_RU`mi;x&H_X7ZP>T0}(Fiy-W6Uo5MWzBgIlkD-^?;!l7_w_i%0=3N- za?Vx|miYv402arn&ws-emfDt?qy<^-46@+moyOzOB#r>*-m*0tlCl2gR#VgEASfr$ z5&`<+xZAxpX8APQRTt;z`@-hDa>jC)F>}=%T8D20m*PQ+h)AXz5u}4W1L7gCu zDI$r#h^do)I^ZJb^{qQQ4SqY9n@+WOl;CD)f`-SgSY&^kR$ALMDhZlLUiirUML9NZ z4A*n1Q=gfO5B|U7NKGB6pH}7Wv`8d)#zRB`srSz8{ur%`Z?h~yYx@~2)bg7O$iVu7 zxnMuuCH&$fF0{%WW9?bL~i$j@(CS zJDZCetB6S$C87g9gLleCPi{xjq0}#BvRtb%BdL+Xk55CB_==@s*K)_wS7h;x6)Ec*nQ%dR2_LP zM?ZlTUr&oxwqtvJHbg@kBOCX2`^A@Pj{9+1kZY1_D9B>l5}YYT$OFC*CfJ@NM;wn;)*Wd8uP2lCE6MOuiwk86udOPxFIy5xq`hTO5E^2gnj63^_r zfbE{Ox2Ig&vPExfh7@18flBY#E_GiJYHbQ>VmpblNf_P~AMapek4{E= zS50qYAni^_K5J^_|oCh)WptG-C3kE zZ3PtP8$l``urZh=BKk+BSucm4nAiaQn;1)7S00LW+ zf!rRwtC6$#3w<2PcXxXLx6bAu{ngJI_deC|cwF^iWmcX!wKdW0*Kc3)JxaJYH5Wx_ zeDi-}KA9>p;5G;~?cM?Sv8ict+gs|`y0nWRs}qt40CWD&QCw%mJ!?(W_20AT`8M{* zkgFc2%NufkFG}F;WOz>*2hzT;EyPu=K~$=&+2>YDoZMp1)E^J{o5S8Iw)5?$iKGrB z5%5@Jjm?hyX1yZQ%2Y@qJmV#gUvu>}`Lpo*#J8Guq>`+y8Bu~&hDx8W7527`;^;1; ziNGvEj1nCD-N$p0->rHrTVid@v^|J~L9`5$)b}-RG6>?iD@{+$k1`UB$K_lwBzob1 zKjB*T*I{32J?UJ+P-fZprB=;B=PNHG6y4o$X_(lV)E;Y`)vSX9COA0lo|Vx?Sg0-r zX=IILQlw-a)HQL&PAvFu<9>)QtQJTkU@c|Ljf`zBI+Mmp;AXr>!oCr|vUx6V;~!_1 z7EzKgbBt#o@J=i4-yHZ_;%FvlVUBoYQm%}>K^Pp29zUgg72_#9Q=;k<_Fr$)iEYGJ@^Ep- z=}Yi`!?(89rs3BgX?^49qW~Xs)~E56yq4OW!b6^V1E0>lx)J4yhhv^j>gO3FV>^3R zbc{(P)z=+OQj<91y37g_42sNgjMlt^BZ|y*!Kh$TVeL%CSR$BKqzddh)I@Ps2&(Tu z5HO?w3UQ*KL=JmYc&Qk3Ns2;0|JL}ssKqWZ_|s2XUfHBEz#P=8)}vmvD$p^*wMr?2 z8L1Fug)l9~y{Ym5xE{2zec?ozEIow-5mXwmBLHTiK~{0mX`n>*(e7{$71&t~f-H`J zx97!k7KlsnU0SZ!vCie$0Oa-Hic{F$p5ZKToh-UB`$px8GyDi!jqTSbJTJL9^sk(C zOINqlCR^RiW_Ct$NhFh<**$P@N6?D;PBs234nh~;Lm%zfI3{O2c&pzF2xY4{XHTU{WozhyBKQ{6(c+UWl z`PP=JvMa~8X`+#$!n}}jqXV2CKQY#>#PS|zaTM0^tV(eqIb{cx2cSNO)}V2B8wE)m zGmL@{4gUbwt$T}5_XXicU`_@)eLGakZ!ed$h532;vB%@$pc8I7zB0y0QFJ}6`d7{WarwTb=%YRr90Du^04np&E{dThVMZnEHX1z zV2R3aWy19xx_6{REZsAKn$?&rHhYiE+%7%M14!xgZV1UDa#U(%{f7tegJ?$69|@7kH8cSUbM$^tR`e4P4rs}U*{ ze=`9801wu)Sx=k1o!LBnD@bxm4^BVN){zKLDN@SgKY7^-eiKF!}L$hFzRw?l|Q31G%qG z@RhUN-^Cibp>6>^Hx5Ad&q|hbrhPr&e-_^A`g-Z!A%NRzP;QLmVPq(9O6I%RiW9v~ zLFy}n@ceRKT0N$z7nLkzc@B7NjGxeh_*Yb`abdPeM(E_=pGNok_NI;*rFUalf-$P| zNgPXtAQRrFM_C3(Lsc%}S%xv1re_`O%(ppmz~?oab$1fTBib5F9x+qRZ#;S0;11PR z%E+|MG^`sv>ocWWa*5Mq)Fk^w>pV&ZHl}jJ&>q?ER+;aTPqSHCl>?2(l~L>u715=< z$^NVc9_JM=?~y-!CeJ{l`qQy4XA<@oW@2un%BMNpRYCj*CbMtlWPxLX<>Pi41Y;*Y z-F~>Pk5Rg2OaZv`PIv>kKK}rvb5mKwOG|fnta-ps%#7p;Myt%{8haf%K%PuU#{`uO z#QLZ?3;2L*m%r6)yw4&NHQO;P!O6)!`%!?c?NsQt$RwRZW99|R4c>zQ4n4Y8nO}H? z5XlXL0l%up!1{7Bewq9$R8gfR<%O|llE?`O92s|o`s66ju+P*}qt>jMx2($;AHo$> zf8&QHyx#9om6?miMst#L{J5;2v|LC^#G~=;`SDs40rA%GZArX(MlkTc%3?i%VlaIx4ybPfuYe>J)G!i67R^%favb*U#~ZKfzF zdCHa%iDdZjv@{=B723;>TqhLcY5h22<@V@=Ln<{z;xjkllYK7 zl}mA|SZT4l-^ea*CSivyfWFF!&%PHq>UpBYT$)R}^n|0iMJfvxBZ2A%3Nn3p_pd$G zw25yn{?IMvnfL{tpdEro!=JMJJeSX`Ku zCFB+XTjycZF6Y$t=zHd_C`^Vg1?xJZEHkuPWB>#E*%ckQ@P zZc^kwBu1H5&-a~&BlNCs!~PA_wN_nCe5{Z8XDBYb{{VRWq<_FbuUE0s^^0iHW}XO@ z1{EXRK*zM3jwwly&Xr&`xQ|bQF%TT85Wk=T00%+%zNF2-A(=uixgh>Lky-Z1dkT|w zu|9K$gluoeJ%10%u8A$!d2UH9{4DlZ5BN~D)DuYj%kdY5^}h^V zNj;DTY;1AIr>*6KE2|1fo)qkD8k(H0k3r!mG_G9*7 z^SI>TS6!y~H(t1uq!w<{FW(sDeb1-XzVEy6{*$I^R>t2;ReP5G=|S?Ui8wjN2I58+ zzd^v~v0n~Z#SAjpqQ^GT9^eK9Wak891p4RJw@odOxffP^J>g%6y2N_aU)oWa7GaQf zhX)4-Bax3^#=YxL)2=k^MNYjiawPCqDT62U_--JT+xUh{{Z3@sEb#9i-mK$c6yroKIx5{IZy(~IrsOkj{YIU?XAir za@$YJ3E&TIywDdbWSQUpW;NEQG@K)b)B zG-8mC|JV4qW4${R!N4^7LmVmMn6)6Fq2sMjA6(P`)tIOeZ9QoT%NlUtRBKENF%QzF z9e$isBj8l1rl}N-y)6Selk1cCR{U6z#!plK0N1RIJvK(|=rQ@$rMbb~=~@(zRnZVC zBnT8|mLQDr(~h;*ygp)ZF@gN+&GdW73diP#FZ?W|ag*prHR#$*-f1zhIVX^NigMK& z#h0|`3n>H=RPqH@@kW_ttLg6@<+w8iAqeB1+5Z3{yEW6zykg;a2ZpR0iDSM?n|Ty( ziF3ER0y}*;&2ct#txuPAzYA*`MY+^$L7L1SUOcj{c^ijSC-wELs~;I$*}(R9uqDbm z;CZ{Ux$X*)oOa!j?OxHRYOSSe^Tlzs?dQ*&jy_gBcK|yc`bR#*>YH69cKeBl2n)R|P8)YMqcF3yB`ePXNtf$we-o8Qw+tWQg z_{r-{)wCO}E^?7S+9B#30;i`SspJvUxIK+w5c&CqUCbM1+3_99Fbf)>AKe`@*RSba zc8_A7Ak!|cJjHgjQ7Kg*5tGhGq3&_lt~%EX5{V))$&kL>AIrU6(&C!J*<rF;Jo3D* zf0ao$G`#qV@5VlzsaAO0VgC1GJBp(X&?E=KXV;}VOKwR+_q%^u2Xf^0t19nay)tL8)MKqML?o9!oYjax5^elHm-x_%LfpX7+xd!ilGtO@ z@E-M-5t3#b_zTb<(zb4J=)jUyzz6dFgjEI`IAim-KSA97RKc6E-pL)PwpJlqfIDRM zIQ8vTF7)lT&^$kN8c|^l@B5%HW3i#t9>)I#(~Jm$05h*|9g0 z4sxeHo}S>>c?IISr`i!_LCDYEU!knqHiGj$#PILLZwue)=`OKyAi0ch21FodryYKr z*1nzM_3<^`#+_)p?NL>g$6uSK??Mkj*1mP{#g><=;?y0cL%`0{oMRoq&wSUt{6Es$ z!#39nBMZIq+pu5IWP?(r&{L-rn@6E(@y!$%xELdVDoOLOu<1{Eo_7a;r>+k*AuG3O z>06vVZecJb#yJPQOBI}q!JCZpS7KPh5aXvxy005>jnlOxi*kJknkhW7948!OH5Aa4 z18e1v88vaF3OOScnRjsNKXw5JrBM;Jq2}sBGl)$fst^p&=b!+$R5??Z)+GWJ=&8ivA}Lf@87?zD65^ZS{RqE zm!+;XIn1ud?yi4QYnQyfw606hn7UT1UQ`WHB4Lkj0v#3$r@=ve> z{{YonAZBH=%Xc6IvgGg+Kc-GUO4owc(=KI4eS;gErqTHN{uN3~>@mrr=(+2u+`Do0+6g1_ ztt)+NRla7GWQA35tjH0I5_lXgMseRf3a%ryapl7tQj_ z0yKz9nTqc@BciX}fYJf>f|O)51lgs*ZZR+bL-ExE3$)JNur5nmPUdxjls&G1m~bp zpP?d#fcGfxT4z%{{X&4cd%=kWGnVNCpUK;!Y?`h0BG@##MW&P(a}Wq$f1a3l1v;hF30dW z#Y25^@TUI&E#Ge=a^Pq3teeeJ^UB_gBO@T9k@}E-mTR9)Lgscpbo&zoZdOd3f4{u{ z04hRJ>du`x-MP>!E3@~)CQhA0cEjANq?e@gRTwb_xdwnL1W12#|MsEd!L zYD-;S2pbmi4Wo=mS)7sRnEs!Q0O51gW1U$+&e2VhV3D)Ca0Y&#TH}TM=oVX$ptsGL z8w7g=1+n!wtGa76_`u`k{{RzHuvTS2I9}fL3i?>XGO>|jx#Tp205}AH2+v;B z$?TruW?9$wY#Tn9s4oS$|oln6(y;5I6X&7wYC!&n{<-pvcMir zImb-!J?bqBZK-I?hBi0@a$^V7`=4CbNpS+lmYKm~4iIFI%OD@hs$1CI3|xlt#y;yP zJP*f@r!IHIEpmz+={&a8G(+Z14|C23NSZS&*ZQf-zq(d_(X)p#{s{qi=(f4{`P8zN*eg z9E!l!^rY17nk?*b%D18StBko_(WGV2{Q9|uNaSR2RF6?vat!lc&*Q%gTYNY5zLqKOW zLR9ywZar$b&w7w66{yIoW}+#RQUpPJ;+WMta4}UJ`ceb`(D=A?r;2U}r5!0UhXGP{ zrV4m840?=Jn1@QPD%?~7E1UyLooG14JqI+zSYDW`kqnXbs;V&XLVlQ(U{_oBCb5>e3H1w7^#7qjZoOA15#i!f9+2fRd ztW%XEzt+6MAp;z3f4lV0^7r?zRns+lyPFT}^Om`T z_i?sxcChV^e()T31df&Fekju|F6`p*29}}~k^H&g9TC@)k75WT+uRyxa$Ql#UfjqT zmf;!Dj1QaUP(2CGYn^W_zS4m6_m4n5`g2yk*Cb5QNOBaB&mfO#ogs$yFkp}?_f!5g zXFJrzl^njv<0Owkj30kWNN(;eW)elUnlsa?F6tWRBWVb}+j(7LnG+x|=RcvT;*vyNjB=x=wODwJL5Bo(IsB?WFMbq^ zWaR!7g2i@sf1em&eLDSXMiage3FUK-!l+yk8Q8qBCN#K?2bGup1LiwwR#@aDdQzR*|7J5fdlTvwUhSO_=9K4%2}RFtk;qLINx zAD1W|)m(2g;C<3N`*Zy(TJ9qr<^!i9vf*a)0dL{TkEL0IRxT`FY+KU!qVu{Q4kGc;)Jx{ryHa!>O|FhzTwfpsZgvQGYRU-fe2WT@yH+*ixK3-JZEoj;LxzsQq=fD3(a zdj9}A`eVblnqAxiTRDR{C%z6xw``7UJ8EY=k3)iN;j#ME&oeV^0~zUAw#|XJKJ{=B z$Mda`#MYyc3(86SYS6a;heLutT4WjC58;nW(DL_lnxu(xDajs3%yMfY?$iP1H#n|{ zs-}6YtI2|?EsT5gq|o6feGejpK8Xj@MKoSC%eIv@=J1Y?4zIa_|Y3J&hCeqE}5(9K^R&%NBLJj zC_hZ&^Q~ySA0@i0PKeJZ-3p|RI*?f4gcyQpjMz|%pd+KZW2f=Jk@?p&SZ`;tDD=2y3pyF8NZV$3)}^6}}8ex#bT-%x15 z{@9iyv&d;=z{up0`QnyMYURLLAXtbT013y`6IvL*l}SOjBbQ(oIrQTr@T}RbAp>;a zNLcf<5Zrd$$NvCjHAIj;xdM%|7nO*{5CQ#hR4z5Ulk(bIMHuPxg3r(XIzK9O&;~AI zgvYq&~(%&V2@W{$EP#$A=p7U$knsD=Mx6BzbingN>{C z5Iyl-ewC)_nrwhcBv$OWELRfCk4%q}Fn+oE(zxs|hV%)ciaA;}j^<9DX!B$P{g~Hk zgYL&4TJ*gtc9F%ba>fcN1pyx}I_}x>r_fe9>Y8<&V#>nRrj3r~EApw&c4Nuswnb)N z!K-QnCe>JzEwOTu1s=c+$LpHS>$yi+eW+Q+KFw*i{@`^HWJWt+gan`SlTyQ@TwcUy zyo|!?03R>}dhxph^{z`r4ZI+pC}7A|XygNP9B@c1Py3+$73gcG8^e)yRYBlqI{@sc z4_{V1)V%`cQG(_@$(BfzACxdcN3K*3{`E-dX%IIvP9YL!DBD0>cV=Q+pTp@|H`ls0 zn=g?Ba^25?!Zvg6%ug8iIQFb~FQ?V!Cdfl>BfxAn_8z?kGxYrF4B69k+{l+#HwyO3 zGO84f-ke}$eR!_o*JR8#sYT-i@K4c4LG?V>DGk9fl(dm#V18*BmdW}kBR`F7*1>g5nek9gxlHAGv03Pe*=c_+q>yPP3 zw2a%2vBPnYhtFe;xC+|NSzt5#5-0)ix_p=h9eFjfqj!CMJ-Wi-q zSLI*22;>i^Bv5P6>FxK+k1&PLYTCDrSLFZ-=AgK4q~LyE%C(@HPrZ^C>4E+<(a$8! z_1HNl^QWFU#%m@Uh~7n9GqL0yq=EJ9D@?K5#z`GbS%X#>-eKf6J%vO@4(2#DDI^Yf zrZTbQngI(Gs!r2OC=4^#(xT`yO0o^xT7lcDIS&)~ZVh(LqLoJc5(wj(`3J>*8@bYM zq)AYWcOt&;C|f5v1lJkje+yabwvMvK3jy+x)K-w6D=jRIjC9lxL+?DtcCI zWqeZ)%l`n@shnb~lbS$6=e0mZOnRKuCZ&m=|Izro8g(Yo+Ln_jaHoJNf}A?gGGSKZ z992Qut}or72Y0EXs6neSjwzrgO3}3$WADf3RFDePvtB{>&1i;dsmz%4z^=nZu%6>B z7F2EE-bQX~#~L z=pGtImf<2fB=L-PJXPUsWvBv@BDUel924$qU&GpUly}F=l&&%`eR^PVQzxOSJ*L7K zwLc5JvE^ABKjtg*_s1!BpzAjf%bkKxoBiR?{{X<0{)5uLK>QufnDCYkGH^hznEp0+ zS+0coj4|b*Z!TO9;rqz{0ETdDlV((Px;$@B)PB=$w-C4Qq8KA2WMcsEdWz0nKP z(8(J$jr$2aw&!DYCxP`Gb~VRnmlNAuJ);~peo}osE2@%U>Eukr0rzv;J*vu5x;bq^ z(Upw$_-uU2M>!t<0EK7Tv@3^XBb=2}f&93xqwOtV<9gsB$ZTYN53#8)tTem$e%+=2 z0HZr+b-6o+M^hVh$FUrGnr3ZwIGd}Bzbu83f+I5p+;M}Cm+4sO_YIs#!{*2U_w@X0 zrMk6v(5$O~dscn5sE9j{nV*25bQFkdQ(I~qL-Mch69kW5yi`_-(ZrJ$&e<3bLZIY- zI%n@K;AB8U3?6%aby_!*>~laA3^;Tnp!F0MK4}%Cbw(=a+n4e5{CKM@>w60ZU*3U} z?md6TvaXfg2`pjCT4O=qADi4Bwe&>RFqp|B01V@| z73cTbO3KXLMh{V({U}VUUqixgtd(QwmLTWY*AUT|zjv>&(#8L5JOoqX0F!B0|X7n$h34-i05%0b_| zkD$o^02=j(tt{-L)U_$$1c0iJt%5fdBzHM}ne@$hCxu}1uOeBSWJ{KE2q%zE0Pb;* z&b$40P>fiJhsr)*mn4&%h0mt~ms6@g35ji}Nu=s!cN{U@!6Tu-Z^sxu)ze>T=I>^! zXjPMrWEJ-5nT?OdwF6bl}Xx2By}B*4JNvjpGI`r zYC^H!^b>s|?^+}Ub#h;A2Sa#XJb zA7lM1-8>&|*3pPwQ#1tRGV~tBda3oSrMXhr^b0F1h=$8si5qV_Sm*L4w(Y#&-@I+a z^!KZF7E2wzP-T?oIqUxb)~l^ie-l#Cyk&M_M{FSh?tnmt z91b}d!mm<(oS$muHJw(;$rG_ej1T~DxW^pe9ltjsCr+I*WBy5spGeKm`6HCq2(v$BkmSaLp40BD@eIV1qT{eR_FVYF{4rHt$(<1BJX_Rm_O z3k8%9D1tSKNazZ)6Q6PXvs<>_7bi1K7-;dGv5lnXwmSZGmXf8Uivwb5Wgq}H@_P;x zl51|#_eqA~O~UMvPu!0(C*-&p9+|}s;w+6h&e-FzhS+U6X&IZ3U8~di=Bv+ZX=850 zxCq!J?bxmQ?jz7}4QolPNi&G_gk@vg1WX>8Y&Qe&CaJBZ{oAznEYq+ZW81VJ_Lz(h zrzh!Cah!eLp?@(Fd7eZm(VO^Gb=tVcjaTJd510Y%*nX$lsR^A6_`*9YO}0n(fMMEI z-L!TJI2j|pO=Mf^7kc>nEFwu6k1Mocy@*xG9=YS*u-8vnNLz#CKmfpQS)14nN%{`8 z*l3?%NuJup8xbxgnXsg{_Y@AE=Rb%XQxV*Fe?gfxOLIC206S190Y~>y2L}hKKPc;h zYprLo!M2j@$SjK(>Nc_we2+M zEv>e#vc(Zx1bne?sL%7wC9bbw2{37Ky~`*76**k}Moxb-TUJ2IU7QA(H$U7$e}pzN zNbm1ZOOcTFO0l%}GqDTEMO})c(*yecRbD8hxhkl9i*h#=#@2sBgV1}newEsre=r1M z6&IlIpXvPRlSKqcxs!f6E;Ez#pq_J|S!cN--Vx=-4&3DK`uG0;K9#o|5nCWgz(L&l znwB+a*L0(6xA6K^d8JrlF2{GPW3RG~=8&7d#8OB@0*++%1CP$KF7DfRl^G1g^UwL| zR$l5yOv*$`e}HuZ-1_}1B1g8jW}a05{{RB7EXUO1g3>6@Yyz%EUv^8&^I-abe@eA( zixAsxe8*$+6OY%mVM`U=-OrqoG9fq$Pt1Mz;16!5x+^GBAQhak_viUjDVmn?^Ot?Z zdLAow+9M+41fNdTLf$wB&drSa;AXl@U;>3ffr=6^oPs!67-mM#qJfIhCkO}x=9rPG zJRIhvdsH)mtGKBdU8)oL)rK5Y3@TV3|I+xl8gQfKpb82k6(Ay`6!1BuGG(hnrBzzA4?#c< z0<9>j%hXk+sz8fT*1f3Or1}r@{Ocw@SS@ZszaakrX#W89>d{5pcJkOZ0mrXO^(`X) zD6OMeo-wp!Z2(}Ny$?T?d8}4*ynbRKVb4!$o)wU7+s#o(`X5cdhCv#_AUVPIIO)c4eQ{0joZ)oHUvWPob@yXkHoagq z-w!mgV91}mO!}|?0IIm<)U~9peti5u@O$`kS`*l}nQ1BCZ!kY3sCpG{p2T|OR&}_K zQn%m0b>RJd5BdK9>-uMnMdp=mZDen>=oeEovBQE*q^BFe*Mf8HlKPf zboUcR{{Vh7)Q_irD$#9fI(3@d`8O%OIuB9wKjc<-l_Vjhf$% zz>mhYEm2}~8}hQ`40_{=#fJoL^f=94NrEJiNV!mVDeeU!re9%{P{;0Y2034zdk6$!hdDpWuo(t+ zkClfc9^=}lj^bGQjlYj2P@afUY#t-A`@BFK% z5p8{-5%_2FteLrDXj)jp$C99QKEF;YIO#IT%^_To)4o5?(z`7_C>REljydg))zI6E zWC+pi;OCxy&sxp96LZ0JUk)^To!4%9X1qJadOwkEE2zNjUtsvLt=84L4!IzA{v*#6G@ zSTcYM4ulRl_Z(M0rd<(q@VLjGjT>j7-Sa8@4K0(H;F>@NgYg3O*wrK`89EBdI{Pi{EFXvuN_qWVlk45cU zH?mKCa&B%uWgm17KA83&{d&l}SsZS2wFgiBxA@XJ;~OI;^6ox?K*vlI$Mmd=r&Lcc z$AWtE(EeS1wQ}b2D^vTW4F3S$KQSLp*{(xTyPiXrJPw2WYSJi6tI;x*DgnHq>%BnxyR#yl7C9pk6gHnq-l-nxNj|T06gFlz*Rd$o~%^FR`^zr%l5yRVf zP`_LZlU*45T(Y29PTY>cGJS>*>rmSZ80ljLnwT0N?ExME5yp|mN&fKQWApyCsXRCF zBFJs+;o6{QK4#4S017IQIPb`;i>*fSCGu?CU=mrn{%0FTKEItd2$6@{Z^{zGa9rhx zJ@$-+{Ad-7%|^-Y9JS5DvavZM<;ni&Md_c$x%sSPi+1uBI7a4F+6I5yVbdO+YfDq{ z04&hjT4EUZR8zNu{o=g=^v^ZPY8s-Ii0T_wIL>3Wi=$(?z!(Gkn$cWk0}9B`sNGCq zflk=)yRbjm&sO*U0PEINo+GighwS!t!Bk`};#shJvEXF-ax0wH_1nw#Adf9`ghlA1 z*ne8Ujwu%)uJ=7lo&f$|k3Q9AZT34W9a*NkM?ZAumEf`N0X+w)^2lx-9a1#QW0KhbkLaL(_3Np#nQd)a@tN}28-c`+=&Ak1{{U;L z?M;F`S}zmJWD?5EBx})$0a$wOBY~dF&IjdQnWXrIZY_i4BuFyzG5K@owct8Vl5W&$ zT9RQ{{^HI&0NCo%H#kB0vFTplWi7UoVH}#4-NTfIHOJ5ZdBFTD86;8KTHi@_cPwoj zU*CU}k@V^_?~2k%17%_y4yUlLQp$U)%u8~H6p&?8fB@~E>ss-)%%E+tuij#EdGsB> zon~a>OD*$BFt@ddr9Z^|0Q!?e>cxVRFr$)teJGbwwt^zXf}&%$uTCp2O-?!D^Azn; zalL@a_Eq)z^q>-CH)zsfljUB=pySY2GV)F`wr6%ap1D5f{NlQ#b9wu;{{UCq<3EN^ zdb0LVqG2|xeq2y#dmJ-dU0TMjV&5h`Fh2r%)KO~yIT8br)Mxw+YU?cup>?>9~fan?!V^4AX>0IYKaEHy3d^|h>sxae zwr_dgg;Sb$IT<+m)oCy7)BqbN+NjBY83$;|q}mn5m^=RRwzREWvkObM$>@xKk*;W7 zvZ-D$YLS$)XQfXyozRa_kZ?yz&y-@5O}RIADI@ah^0(Af(hpkb-CPO{Q82)(5(Azp zGCKQIfmwm7?gd+5dR1oNP{9Aw_`8u$6rE}7NTJm6NCs)ZP&P`vYRJV`UbQk%1n4=d zNGh^bl&e6CaH%i)S3?la2dKyA{{YskD_~0s+J(p<`{u0_j-m+gs<_7){OZ(l2oCfB zH$lKZp{xjR*hmiF!mo>{-x)ZqE{Zp!nmHUav93>C4Y>TcuA1HBNYT}g1OlU;z3YpH zSkK*ly|Z1mji^m)8~Gs){h)#~!1wm4W|2OO@Xh2&W>I)Vgb+X7Ij)1l`q!0U&@dz< z?GI7IG2EHs$|W zp~mM8@s`r-OuibPm|Mx2i?Z@lall~9@4)x^;=XFV)J#Gfm_hO-&mGa%hFtr8ppNzR zZmp==>NbidezjGw$xLBV0{d;UJP^Dl@rE2~K+TS2-RNR)yw9|VBQp8Sw;_|--9 z3XyiYCe!sBJJwMQrW>E(1Yn<`AE)@&N&U4nEU@Y};zv%7P6+hD{{SMqpxTuo6fl24 z4&^^iwds0Bt7WO$CY|C4Uvyw%c)!+T9eMdk^#_sb>7kTlQPvV&LL)7#yTXywkC#10 z!IQ!0d9DuP&L+$;xgYH?192YRNA<1!UuK^F0OrPP8$TtieV3F8+*0y4f5Ez!+a@ixVx4*4dn5rO9s-XMw-&`726^Nsd z!4jy(0XX!>zx1my%A16e45-2Ut2SGO80rG{Bs3)s;)sCpImcDXy`38^p)_uXSOTVygA{8K!t}=IIknKp5YrP zcOOGukD=@Oq--J(s;KH}Wpk>VN7XtGkrG&`0Y^W}wOYEf!xUCjY;`>|?_76=b@^tA zL$=KGjym_?n%cV5l(0F#4WFfR+1%XLnpL%cI}CA#2eV?bKeu!8hT2DeN~Nmm5=mkT zw&8-^a(MQyA-mP3ea@dJBy-xDjlk;sQQ}l+SeY@mj=WcdYdZC_huZipp8R{%n%1%v zT0E1Uc8-MCl)k zy5ksJ9!5vyUW4JQc=Ber%J&`ij^qxXO2-k}M+9@Q-y?E~>ODyPYoyUEp6*piQJ53k zjAV|rvMMpu>Z7##OlIo@BnZwCvxN+Hr$65~EJvm*jMWs#cM?lr0#9BuUa55@ygEPF z;0)48;iP8A0YShAo=@@|SD4-n(lRzd$MmB5o7+Q^d0HufQ^*QO)N@T5*6 z^E;<#19@YTdY)?#U0w%a89bbm?mg?#FLcFV5d%u&s8PVpO{e%#rW@{L@+14l2Wb3G z4^K*BJo8U8Nhg;Ak-LVJuRn*NAD^l1UA3;8VFZZxW=F;kKRcX%buqPb+d2NV(;ICz z!30rxY|2XJQVVCX+kgS|89$#&lUhy#PKWHs2UFMFlfWm?cK&6?jnvhmmPE3*hFE%K zREBPQGj{jJ2dS-HLs!uti0_%^DmHA5fWgNgoM3yL{VU8(=-VZ>mXfl$A&QV0+ugqL z{RLCA)0#&S-ANiV{n*22X!T+<`3^m3vID(`S8JL51~IOKkZ?OtXqvqLYr%QSm8%F7zLzb z=vj{g@cb&00=>gCHt9eBWchgwf_i7>BlN6!s;c03%w`1--r(dX6K4FPb=WfCo*?PaoscrBb-OlHhq(vF{x=fyd?kHCV>P z1-NYFpV1-m(Bdleuf%iw{S1zuv zVHom+?t1{(1MAYJlJV6RRf(ifK2d_852>tq^-K4-5LwtV&fMHxM+Z&p1u>oPhv6~l zid%yrE%;WHi=OpuVpwBXlkT9vs2F~A+Le`nqQfSjVa-}$P!Y{b1VTpwtH?4rt8LP& zKSBinfB)3@z*K^nQVK;6T5zNk;Xo2ETAg}S3szKgrZRLKRfXhK3sz$lND-xKykrW} zML0ZFI2VChai_l(XrPT(cX3zZA1T4;DhT(GPh3^Fm~9MHe8&{aC5ee(0Ir6{Pn{Rk zgIVx>n}uX$C(!4wv8`Kp;Eqtp$1BDN=|O1hbeq<-xKIuOV1Fai@~QO)xt8X6?MIpA z?JbYI0X!Uc!O!Pd#1XPZ>~mY%R-^r!s4duyWky1h1IT4PzP^XC$pAN5b4d`&PPv_ zY#R}%{`E7r86}PhBz7l}&*fU8eKL5&mo~2%Qa3Vy_{ltwag*uDsO65{=xx(wGX=>a zPnEgp@(!eZaYcx+hPpzksgQ-`l;GrWM zzyMEDamh8#%Vz3Kj8Dzb5Af|tgGF%LFi9B$in|TuAPxCr=qolv+A!__>?-tHHlXsu z7m&fO7kqoT+`gmQv*(TZZ>cK%4a;p-2;5|FeL8-1)miInouRgZeQVBV)9136%e+FE z?~IDwv1o7^HnH^`YRzEZQ|R3TS&c0ak^`_9`d3FLp%cTspO7(*xz9D=8g=20R70E& z+3jBQ;f)H@RgFB+m0`)ruMY9PpY~P(4*;C{kF7*xn<<_xsyT%j&IcTlGr;uCa0o6g z=JVrR?L23ouTz@TT`-E{l)(r80Ix!5;14bo3ckawZ*!V(yEwBpt91djk|^0b9&!(; z$3s^w{5yR+4eacM{oI^n4^EZn+IEP?03heRcMxb2>|M!>eiaf?d!nODx#o607f8a* zFbw2z+=}UJG~;U~;JrCMng0MitI#0O_O3D8tq-T9c?vpo%|2sg6ml{olP(!R1A~q- zD+^H5{#7M#NbA>-cHq}-Bqk>3?)uf+T{-n6*np8A-t;*?(D$O_X!+k!h}+KR9PmFb zYT7u8FEI2(#clYu!|eK-LZd2MxB2?lFRIPu!Q`hb{8gN-48+zeOO zmJp_=q(^TyBme-U`;TAouaWdvJj=-AQN&84@g$R9Q20LjDH7%>@&*I$QTK2MIQ&0K z%4U@IJzGVBE1f>xBG{1tQ;;$U?ngoo;a!!oGDa2u0D%2hANmsg4RLyGsTIT+!v@`v z+v-1^ckxHJwln;xM8>A(gnN9jPbAdn&ch&ZDg?mm&{WFcecXXsm=Mp#b6Dm!cRPRm zRjV#s=e0$(QFGHYnM*@DNH*}N9`&CzYM|z{CXra}KuM_0vZ&uF_r*z}%1>x#lPnG! zfETI%00C5QtVAeMB*r+w;D3p(s!5R)w*#;z)Yb*SQg>vr&+zhd{VJ0gj-O|;aAaRP zLOxX-0DF9+`t$AHtLj%*acvT@5;r?Q`-9M9x#~}EdeF*2kcpX3Zl~(U)7W+I#bDgS zHO!=BmSf9y`jb|MWVOuFI9Py8bRj`J^*QUG&ZC+wLho@*{JmO6IODc6fIX{z3rH;@ zEX7>~U+r;?4AkmoE<8%F^^{7L%pS*vW&ME3>420Ss~N4a}&rMlg=^ejAEN#6S8$k^X0Dt@cNQYeLGXQ9P8TLvD>4-8+gMzU<2u%gB*9pYbxpk zjhC~v%*&75t7S(R0ONsHbuSXyODiNXtZI1qR0EEg9DkomH3Jwbs z>(2oFYtJn2%6{_WBmvZ5S3_rY4a|Y0EWqQ8^#1@K%9!qYeue#+Z!~jCBKus2BYSz% z6ZAO)@COy?s|;7DvT93(-IYdS0Y2ri&(Uklv~612$}nuLk|%tt5JpcQ#KSCpoL1ag z%H9!e5?iW|^|M@XtLUH}PJ4he^{m>|v#oo3d*pcZbn;GcZgaVEJ1O9w%N3bwE;Lz- zO)E*3;{+1t*sxU}fcl!w)O<^0VF#6OH`}LC&z6ED9?GLUeQS@o)LuJ;)OCL@Y1;uI z$z+Ur01qeEifJ()Kxx*?c@o~;-1&+|LI?fYkFW6`Qh4+gZtnIph^k1axWHkCKY$rE zGI>B&bect zTV9DRu5DBxA9>{5e<9QHtBjH;z_7g1>?KL8VR<+5%Mjee52pgU_4A5Nk09tOqX4_ko}&*%1CSR8g-{{TABZxQO6 zjIpta;k3pJ3@-woC|v2U<**j)ZzdtzhTPa0^u;BlR`J-Zp?rwcDryNrCdgfkDsw4&uxBj&D$v;;XpeiiPgSQaj)p zjNBpWd9J&XG3pwTq)vso>?$_KIOjD0LgUh)aoAOYbK02v3PV5t)%e6zgW987lo~~k z1B!U2ihVH;T<|K&^{68iWoQ{P)yRf%RaA3Y5HBOzsSO&?6d2F7YsMjA!RuJjCU_N~ zi2MrBv|sX&@D2g1No-gs$l|CXk&N+M8`MNff(RJqf{{E?$|hH308v4%zT^BWZrB2h z%syk#)_i2F3i-)V&(zkHq{U=a`evC()r5JG70*A)oJm;l$ESa4u?EI26l4yjtxs#Z zF(MC@hyMVsvywNAjV}-Qku-Q^TiaB;v{DVll%zB>XMpRUxghEY2R-YX@ur92>E)Ni z8eQ7Mr_1}+h435<9jD7!$4*y_o!n$#)_nI<+Jg*%WPoK}IXN3or*ZWkjc{80cM!Wp zX3`{Eepvj?-!ELNcLP0nAYf-0tU8x1u3^BELQP_5pp)%ZXqV7PWwn@LnND~Z>OHuoV64n1VQR%@LPr&^R8l8mJk%I!oIub&W4~m@1Rd z9P&W=)%4J%n1OIk2Ty7_jrk8BnCd=L`5(VKzG8#rTyz;3=~~(jo8kNWX;WF2NN(9q z#%@qIY=PU>y>Was1aT`{V0KaNeXAn(!LYd>yT;*?{m&oN^G7l#*)x)Y+rV;2Cf4mH zNs*L!cB3&eDH*{BxX)Z;-nDM^-7alGwHqxkBvHg5w6+K3i3jmJ_jc>Rz^lLThT20I zRO^ABI{r0oFAu{KZAkKX{42{?8fy{t0Bb;$zm8_#Yv{cWh0V*$WO??XX?Lm4WxJ76 z2Ze}W0rjf(z8i)aOr@CDrws*(`IignS5Ac6cwSF6>0FOww>j{sav z4WdOL$ydN`_&uwLu<+Bd4Y%o<^!*1&o+Tw?KPeeF&$s!_XD6YoRgY4J;pMh?fywQH zNF%=kJuAh0Wo9G}Wf%iIo|vzobQnCa3m`047|sXd)2(t^=ZE3Annv=*Gmd%B(y@-3 zf@=0YUyDTC+mvT@Zd&Lrgfj9EV_vM<0$Vcxo}^>aofVi;8Mx%?{Px2UCHLdeXQU5<;k?^~Y+=dvHf12dF3c z{{Z!?dT_@ejAQiw0PEF7g~{*FudM?)JF6#|wsb$!*18KjSoFJR&mfMYf z7US2c^{jnEebg-se<(#Kj{f{pA+zVd7u+nGgUz=Aax>~r^c^e9%;xIww^dFDxvx_3 z+yXMc!+=N~&upK@yt7S(fhq{ZYr7Njk? z`RPw)iZ#hNCZMvkVPoX;Rpx0W&cT7}{uGk9Ey3cH&YKD->BR#vuHt4sTIU^4b6Gb4 zTZSDt=qrBaMT#)z?=^Dw1@5Id^{Pc8wh2rQt=LlxN|`N_pF>tWlej3r1Fs^jrMB>7 zW0f2#uXFwb)~IF#!Q{)a`A0$j0M_g)8sY|WOW>2+{{XLDP0Ac$SD)qds>^8_p?OA6 zr}ZcLQ@9*DY5UyBP!t^rI8pvT%CB2NY}-qI^|OXK`9VBw9;E#%YV}&+ED)w-YypA7 zk7504GZn6!s-xzUI8_|uxnh0)0M8V_$Gvx)q%yv657#-y2jTv4Sk2|La5hGR0Y|U7 z{(t0F@<#JVxRMKJZaWT{tjn!Dq)ovKA;8B?sxou+Bvo7vQtIYOS>uR|V3%&0^dC;2 z%+_GvZl`(3Bkq&VeMvkD>19}K<7wmtS@_B52e9?+llhE9=S)^&_9BCbG_-bX<^+ z_>b}^B&tX!AbwRmSM34b2hG^9>GaJ8+Gxh+M*Qj@Eo9?s4&7>mbe3AJ#AsQjZU+jd z`Si!ux}7&!wzjruZX4z#u{<-eJpj%^k4)s(oWu-muypl2{*|X3f#*ce1N8Sk^v83d z)HT?{?^yPxazc^;Zrh3FvG@w&^&b$;Ci3zS0NckvzJu%B)$5NcRxA!dVo5#gmb^{0$4b-YD)cIyxvg&{}K3YP@lg(;)ytF1xg ziRY43g;$Iq1fRs`Kb9-bue3`|Q~f6rhhzS=*pJHUof06)TaRzTW*Y zaAS=}%B%dU?8~INd9DBu4gd{?By=3JGkxsA~5c4XjPH4g&fL zU)q#Ut4oYb~UW^~8q$8p}EEX;G7oP}p& zoYY{Vo^e?-F{-_fO1EzYdva-sBxfLz?N_bW%zu-$2@KfNpxepE;ag>bhdlj3s@5>t8tS~=qLmK*7(UqN(DqJRis%U6yZoI>(-bPAgc&Cs8n%QV_uXXCq*Tz zQUt;O06$uS7`fuLA&Z0ct3@I4M#ttID*6^3IH{trxG^k0II{^u=i*J5CK3DAF+EHUJO_;~fvP9&I1$o5#tsGk2p)v2sE5j)t#E1EghP{gN*`6;oc^&as^Jpoq z+Pq4erimGb2PbIkJD>i)ddf)A9yvCZwqH5GkrZz85RQc71ZRVsWFGacq8N1R*{rVM zYs)LVhl$jo2eYmT0o0$wWK~^O&P!QbV6qV1y#OQe;;U)*Q@VwMQmi>r8*e_G@t=141b(d%U~o@Pg>ma;dE&*cN5h5cjLBCN|q={7z>T3sjSV8T(;w%I?mqKXNlAlkw{WV7(Kh3eig)NvT_JNG2H^^sL(yj+;saCHGT~`KM|Sd6y*Z1fNca9eUPcNXtIxn1P(v}FT;$=r0uf9qN#LuUu$En00->Pbi+IoP=D z2SM+O^`;6TDU#ZZ??L&^d8QDT&l z$j%Jym+4S7qNw?JBy_H!w6|Wz`PCQGqy+W+jRjpC@U=w*o=BmQ!g>r~0s0EsYerwa zQ3#7j6-QnG%>yxoqdCI#?rIyGa6pYdR9>WYC+l4_!D5eS00qeRC;TfKRon+Wu?&D5e8WQ06)yuJ4VG^s^fVe z1O9y}fyZhAcBuo*36jL}GoRM8u5K<`-4rLyFD~E2xH#H6Wm<3Gcn z{2I*6sN?UW@I|Q}_z> z4L;r&VfE&zT-n29zHQs_&LdN}=e9?$)YnsSYi(r4-4imOmjmaJ{{Th&>zC9+ z$j&mv`gh=Z)utwnLgwvc+(btQpktrVVyF|4b}0b*irhATF$FOrXC&mBr*UU0^cWrZ z=AFo~mSfrhB;fscsiAerL>b4gADvk8Fb5zHeze%#QHa6%(zsZMdJOMf$F_JZf5MpY zh}ikcUjwH=52xW)njxPxNYAZMxGp$O2T!e4#bUhs8yQ`jy*i)%wPMX@ZzwV!m3toN z=qp9CS&N+DVya5Zw+aq@X_9i+Hp3*S;;W&WAVf@8>0K0x3FCkU1yXx(Pr0ZSjOcH! zAaMEV$Ths)DxYXZC9nrm>FrqaTMShpY*hOicVy~~sXR?_BXPTq22Vjl_=@F*2Vuvh zVy(Fc`P8NbD@E9@!hK%qOmc7zK_Jh9q>y3}>P~4nI3BdOJP@O>tj1IZq*KzO^%U{1|zhC(EE(`^`>vqpIP%aQNRSA7t9;Ubnc47nWRoPmR$^&w+A+^5Pt z>s?V#++g41OpivrRV*Xjx1I<+2?zOAkL>%HhUubp9Rb=!Nl6n4ClM@cOCUK5o<5*f z_LjbJ0Aw)7r%J+9l`*@gsjkPv!roW&E>w^R0o34+Uus38rM9v}0c0Daem_2|PoQq{LW&N^ox`X8wNwc6Q7Yd-r`eBfjT;PX-4S$Xlcop-@J#@@Ayb}b$S;!~%I z-suCF<=R;`^YaX2B=7+}M>Xe%Ns{(yX49uw_Wu9@^y||ZU^|gs*Wy19&21@JVrGdz zC#OPx#=K+1T4txFH}<82xPTp^P1)E}$U8+?t-A-erx)mm;2H z`HMH=J*rtyXVP|&8cQF%TUU}heCO->&BYZ zwbr3+Zj2s584Op`JXa{NobJfmj=l3z&d4_6u16%rSDc*mAIiDSM&oGQ&DY=j{(`i1 zr@M~P{>Qh>+w$({PrfV8B1`>4P*|n{Hbux@gtj>(o-yf7A+dD{SQj!lD!A$C>0WWD zhXzQa2j*|R&H!&??km)^gV!KGmu+k~n*{ zm6k_w^7Fv;825b&;jX8DoxncdI#OY#4UB z&YYuiE7-Ivh+9T+BLe#vpl|Q(>OJx@J8_EfJv!l@-K?DR8~6Pythc{wH-%ZYq=RWE zpyR*L{*;+;YjZa0@ilw5)3v#bHxWqQRm*dX^bC5HJRfT5{3YYPYr%JoV|M%D18XVy zm6Y_s`f{i6tW83~Ene9+4=VB>lRcl%lj=uK*!*jn`%?t|&ejo$jNxDSlj-U`sWozL z)4BGZi}4RiyOt}fNjEzY!TEOQuRP;F&{meEGgrcrAfhFr!?)@hXi{(dhBF;`5#rQsjAt@{{Sr%{$u)Anc3? zohxbfa*Rn3U^-L{p=s<7?#hJwlQ2AY1b>ZpODHvAKyH<#k)J$~Nk2E7$inMqd&^Idz%Ap%>8f_OcL>r`ig+BniUi3~so+qMApJ?priPfesLFYAs!3deg`ZP}f0&Y8(Qxb5#u z4lY>5xxh@w+!EcrD-z-pf-+8d`kL<`u{Tn)GG~kd*m_n)-H{I8E);tEo-y^J;bVZB zC+I*Ts+1sw1Dfg{)Z#+jMt@48EmtJ5=sEmoBF8a1G84(H*<@^Rc@?#L9tl67tXU!_ zJX5(P2`4MosVi)>F}dU~5Z| zYc;Od)Eb?6#X<&Z zW#W-!$}+V&E^sO+#}%OxW1jTD)3A*|&vJj2cGj`Qz!=~FGg#U^k&)3EaujkuTI|xy zM7db-K*;0yR`FIvq*;R9Lj5?a3>qgJSn?{QD(E(VRO21-{uQXPnG_snjA!1R%~Bed za)I+4`yOiKch0>IKA+)KngCQ0mCxf*{h5YBuw(7l^~EcU*cyv~0l@>-qmuI8M^hvU z2psJ=Kc^Y2y{@Ng15$0`rsW&fHx`O`xM&+E8;0fQ>HZbB{iN~4a@O}Lowys3kG=*g zg#(-lok872#&%18YWP`g2nYL;ALr1<`S5=l?6mu!VgnkUk&tafKgi~cHzT*d{<9la>T5* z^178H8@_C3({Mk|qDti*dqTHQ{gta>Bq-6OjnTpW6YtztlX&yPms$%O{Vf(mkd>Y% zK=HCarbaN`!R{;DpqA5I(@a;GX_tV*jN`b)QoFHBm=X(+CWmNUt=9wvd)v<~SZWFKZKtOq0!pKszEfs!%BT6YwY z<+js3joOpKx9MK@;i;`JZS94qCgwszoc>*USCx2^MiEQC70go&@sZdbK>nXmR(u<+ zOQ%S#?}H158Rr=FALoh;qwc#OP}^C>a|qcx;xx+!Z0#pJbBwninaxQIv0GTUxm}l6vmP`1JM_$*Eh3qDy&H#EXDQ&PezD zc&#X-DA~z%LyTt`Irr=ZCW=Q|`PvxuIqOw!*;d_)bOWzoKhNP&q`_4erteObXqe`R z#>Xs1dU4PA)@7btvs+tulGftwu?c)JJvO&fUqQ#M==yS-q>*EZ2*F}HV!Ufu(Gu+l z0|%!b)x&BQdZaKLSzO#(u5gY>+w?!!>_gr0cQywi7I@I6F>ydSb05?~(~8 zXdLrj4(V5;K)5s6D_P0cA&<@YbT!-$iI?(3R!2L401=LRcckgFB$Ckg&k*>nY#}By zy_2x+dsoia`uB=;Ek&l5^}%MsK@ZBHf3$j^f6}NrF@rf;~TB` zXJVxP026W7IIg-s984kvIeeZ;I3vH~QsB)MM9+T<6y$lJFwcIJ+p8-}cPtPAw}#{8 z$GPL5OxJ-oj;~J%F64273H)=6_O7Q%@ywSt&uewG`Iy@0JCvS3txqG|Ig>4Tg5~d| z`xM6rk&_qz3=X}r4n1>TL#V>S0`A!ge=*j*PsFzKM`0YN{d}wyHlD|f`te>_sIDGa z3wGF7JO&(`emVB7B211qS(REPmA+BQ1L<6VQe~0<0KRyxi&cH)VO~kxe>498>#Vz& zyor3F)L?#6nsCXJcpZtJZ4I8^w&3 zyK%X8=aafrj@dwR!!-46?d`gZw~$O06Bl zu>6FPj^l$!l_s}6A^aBrJEFS=Fi0nkr=}^oY!F*&BG%FT&n$!tIu`GYeR|gh+J)4( zjiz^A%edmb3&JpYQ~A;WUASNNX&S+%^hdbaOl{y;_hbMvPpw;f(g$8o_*XIZj3yZH zdBr1K)f8zyKQrTH*t^aKposSfk#~TtI4Q^ z>ew~Go8?i@aY{80?wo%*Lb2&EYNVbI<65xl5-}MciR1qO)m~q1cFp$)EP8u>6}@e7 zBVapV4te#W#I$;?rl|@jKYhPX&bA}gOCAUxO7h6=iuE}nuScj9{x51~PAK)5H9URt zGCk_y)H$GtnVH%%Sczj?4jU=IC<)BI{!E)|;vwyt`U z$*xi>u*c^-b``4(+4;MQT+EjQb$~0UOp49Au#QoXIW-J7ZJci4R-{raoF_h}txD!i zG;nwNGcH;&kLz5$#kM_DA~Zvo(JPt zk8ZA>j80qIJSeW(?$O{oSa;(+jbX=bQf;g^7bNHDoKrP1tz%dtZ1aJWpT~;Cy9*?l zz{Wu%u07#dsT3>^Q@{qQ8-cYglrkZBS$OO^f%ux{XD=I-JpTZn zdemW)xQ^Y8R=I@?YE+Wk=QXUg*bKig{{Z@_!r{k8KaC9$UP!<-5lY8V7DdyKL;6$2 z;R6Lv;Z%xKcAr`~m>PopHkAa8)d9Fu{xv{?ed)L(x8+AO0{;M*2|Uv7&OkkDKXLW^ z>38~%%8P+?i5oZ>{Aw)BfUX!1YOW<=&mx!2d~n24xF7$~_|-|PC^)DU7#;>{;vvN% z%Md0=9jk8I9quwa=la!HU=hTRjF#u{t(!1`*LHazVzfwT>GYt`tIT-XKhA5du+@Vd ztb^&1T<!M$&V}iC!Rs$^Q>unKXNh^ zbH`(!(AQaQtV|Sboyh+HcsKrp4QQsa8;KM*)<6Cxqf(DBeJbK3g1?s^p{lalIt+CB ziuM)1)7*akDE9vVz=!kAOZHC>U5pQqpL$mV#RLwgBxHKjh&lRKuFK%dE1b2I>yz*P z70+IHJ5jbI?B2ehXY~|a0mhCK>S@TJhTPMN`?m=)Qk~TG+S97H8^BdIRVB7Pxnt86YE{Ao}VPL{fUBTiCx>3zy*#001Te_>TBfB8h9q_!=50XDU%kL>UQ~k5u4^g z&tTh=KH|SZmd-dM0f%mD#r`k&ZY@v4uRg%fY(#~}3=o~He*!_k^x}(HRO)j(9}vl9 zJ?@h=pg5MqosGr{pO}AB^shQm`{Oh~i5H0zr%vL%Lh@Fzou-6>+_A~dM{lpwp{9zOI?X3rWw%&kZGapx@6SJ#c6x7!2#L|jF-qZN7{M#Z zI6j0NSBKl*l0wLKJ8_=J{{Yurg^r?QOt0|##~r;$stCyX2TGa=%E25(0^vZ6Zanja z>*PmQ*%~H6Oz`=9Vf*T<@m5wn&;*VaFd@^6wCM zn%Xey&~_u5<*qziEKoF1zwaA>dvnL>Tm|H}H!bD-%ttxxlj;2{SjV}Xs&+K)uh!~w zDPmjP^NOI36$Qp~^BDSu2%RMhgjy8r{u^glj1_2z5 zVAb5sMo&|Ooqp4G0E!UB#y~u79CPeydGwt@r!5pDD`Ome+;DMS&YPv!Y0^bC@mV2= z1D)S9cK}qEdi~V$vswj)J;!Jrsb8^5mve`k!}_!h9Fs)K8}6JDpL*0feZmhcBkl{F zjz&FBI#S>GuI@{WH1BaL3}HTOeNRD4Z+qh1QdL;77fSOK1$eJU9|IdK9m4$z-B%n9k( zQn>jVJqJ*^w}(lyjieHVLga&zIO)$mxvwGAF0$;|-PHYkfBkj3H3evew2i?25snY2 z9)h@iMshwyJpQzj9HpU-NS5o&Bkqh<3-|LR-g(PoHKC1>li!-oy5uT%9GbHiu<1r^ zoDMjrS;^%*a^osEVd^P<^aXuJDl26s1|8OcJMXSDu)^e#X>f^1~m*2le8+m@Zq$Ms5oz!5#C`rI$}bb?()A9FOp* zMbDXB^p|;Wkms=cKl=4<8FB}uO^xn#a7q+zJb{Xx@0b!n;Bk)C&aA|I-RY*_6Ytul z#a!q<*v*Ag)bfAAw4j`x`Tqdx*CiC^IIXBA7zBQl0oK7f$KR!H!+5Fz-Gf{-bLRvx z^a8C5)lwHda&UX&tphusx8wM_defu6F2HAz$J6kxAuip{0l~#-+RidMobgKHvFNr_ z76Y2-p<%QP@$Xz_pCN1>hx*q@rGOmc^`&wwZzQ`q9<^z2)iJ#L;<>-HHZnoRcr`n89$$BF<~ecOuekNDN>tdahwo=U=kVsc>$phB z!C4Sv2h%+MRmfgi?=2*KPI&Yn@jl}}#_*Xx508_xO_fWI)S>;o@Ham}| z5B~sOu%)zCP}txSrj>+o{zl`^0M&PhtMlKjbTZrR$~gO`vu-2#cMfq>jM;^A9^N_T zu*!b;=jmM}*<=iRRxI)1w~mLcXhR_}_cX~DAUAPVcAhs5Dy+&uBpwY~C7AcJF5Cmo zf2CnfA!clG=~{Q2mCyHU5y{O4ZGmjy6ZNY5xZG*XdY?*Y!QlOBNF_ou&2&~v8!CxF zE6)b7p-^2zbj@`ZQ7n;#U^)(Sj@6_$0fTur~^QEW_IL;`?6=YaRw36XZqj6C+rO5lMxPP;T{#5M1gn`^s zN-i_fmDWVzxIX-UI%CSjC?kPV0vOnI=AmUOaz#keDFhA)H0zlZswy1q9Q|nl|I+yx zU;u)xk7gLFky}Vkaga|@RU(y#8;SNcv1K7LC^!ST6-vdLQzHDRyxC0=tC!hyErfNe2!r&fM zaDBh})hx7Ek~szm!6V!GR8h%1ZHD9%jPd~dGg{$QxIGktIp^@G+R>SdA;<@}UVD*J zQEK1KZL7QS_o-T0V$N{fpVFOfj1&FtgmlRN02-EM^HG^`&!?!V80%}}!B!yWw>8jP zYxj}iR$bWa-~E5_u5L>iEdntr;y?)7=tg<^RLc=kFnM8~I^a@v8QZ3fGQc~SkRJsQL)J6zhDn4{jU!epPI9M^;2YmV2le8k`sIsX7; z<$p@#l}6r&qeh08XM_04!hJ7Wy|9ryvuz`7QInJh9s2Qu^sXc@-Yq4;hU1v?(3GLajMWRr{x`w{Z~0=|B`J0B`OP~AN}t7yBRnzJ&lwAW@U7I(*% z1K4)OGfUKEfy!rY?2dbTR=Z3maOl`_H!BXl0ayL{YJ=74b;0v*MIvR4x?#@jcRGKgr-^)41 zU9tvy;B~EN^cJ`(FK(ViAY>=Xpy#I*&_!I#d@ZnrCHbj#{yXZTz_}@;=Hie)i_-)HH}p zxyv?sWPc-B%JwNqY>x8E($*i|g~J}Xbu~T)Lcfz{ky>Zne(o zDIAhA$})!?f&C44F;C}i{{RZ6WjV`Ka?QAvQV%?l-l62SGuDvqX~ipK^N;CT z@y--h3}*mhwO~?5G?_@(Nlq|09qLt)%7S`TG+bb|d8^S!p4{WFr6P+^lby!83m?22 zr~d$2v7(G9{cC2+s5tyTrAW@#PdNbguC^sRIR_@WTM>Y{>s?GO`>+7P?^49YWdoj= z>DsMFAq(s|>D%j7*-Lci{{XE`P2l0jew89NBV!V(CsDu$`TiB7Z*_soasGOJYZYbw zEcN4z@&}-+F-d@|N$0OY{(96jCXU+9@O01TT?`jl9GdfcYrJSG@et()=0Ez~ z>(9-m$r(t%exHcsSGr9Ql`utc8orz4xpiNcIUdLS^{O?5x*kMfcqMinvIwhk?YV$C z+^z17nwt7|Ruo#M<*yrApGGhn_ z@ua~6H1N3~eJayNHe=eR#E~HzfgZKFZ#R{aUm4F8iyD51{A-}K)8)STE!b`&BX=3C zV}5{Mx4e;~>`3emKbRu3^%_T&46sr?QsqO{eS@wwO^_JXnfaB1AL zuys;)k}uV# z*4=1Snt4;K$AY+hEf8|x}>|WkhJg8O}2e?1sNM*TWiO{ecA3WosKj-oK)G(_I zgWuM+o-)eJ{O7sp_zvcaT|MN+;nt?(A%fIP8tz=SIK^m3rwlV3?s2sH5A)i#ZLGG8 zD6X$c(B5`eVUg5UZbR7Qpwez9%0!2Og~zRWMvLKKt>#R;3~dML$^1=q+FpeWAUm%$ z(F8;oQNXFyobs#D6zwb`w*!I2XandtrevR#cIs)a;B@Uw!&VYWw2GjGx{p^z|O= zUXYTNMq$QJTEx{<-Z^C-IrQ#WdVPA<@i3^ak0^4>2mUL@+zytWe{btL)@fDUN4Qq=UbJtxDd3%OAs zX$}JejF0}mwRW@GK!an99=pGw{{R~K-aS(G)x!^zjuarmfIIt~^)=b)J}5E>*5s3( zGF#L9^HV6BLZaB~b$Kr>*EVq%khVDJ1##My_OBFfiEbH`4x`iQUYn=fTiz(gAo2KC z$yjcliU zag`^Jmlzo8KPu+5%_cv!#UzdvH*Ef2=ZeuYCl2PngS;ti3h$L!h&cfAE7+jZ5dn0J zE_#4HD?doOwY9cWHr%=SN^_MR`1Gymd_!jyp_a^vnMO%HFgoY?aniDt*|M@$M;))b zT0nNNNBWMRQ(gnAzxq}g$=nG8*YoNAc&|Y5Pl>+O`_GaS&jaQgNh1XE4u4AWE31W) zW^9gmz#orqdeSJyq=I77%&`NS(h-Q`UicWrWY{6Ne>Wd4a1Xt1vgd)?oQa2XGDQTQ z^$<*RT0U1kwHqKEvq>`A7Tb(xsH?XV+xc%L$T`LVTx9+oDwryR-kg^2s7#S4A~oty zb4*4501jD$9MigFwgUcnu8MdTG$*GY=ReM^#}qb}(WG1f_p2*ay^SEiY_a}b=pC$% zZu)dh*gY#JOtpKfgEIrk=b_ zsQf7uSsPB!KMK;b;fDtw;Z!10NaOIWD|n7~Zon8AKJ(yp^HUf}o7N{}r1h^jtr zLEsPb(w_~=qkye~a!xz`HP1S-$YgSMvF>;T`*FbiYUFZ~;2hw0>T&%jXe@U2H@iR? z>H#=B|=1ks^42Iv7C&9 z)}n%a-2Szvfa_hccSjVLD?D>rwUX9Mpup|!JwL*^KvV4l`>Y3F%DsDDuoFV+2R**O z{d)0>rD!BM^*wuf)gz)V=zaPWk~atM0zWV7SvM>5XKpwnKBl@E1b21>w*(Ho@;lZW zu{%-z@Eqg0=xUKiC3wew&Dd5woP^>*i^G7S56!J`c?CTf$db3VDtWXt6=j&5cs3XN{3^K<)$3e@e5Q00HS*AT}FTz9`X|iz)1VzgnR8EXlS-4fs_?atZ+RoMc`$>7um-H(1M7>Zkn=}WktF;Z+q&Y-Eo4{@K@tpHUd9)_4$idRy-d8o5e1fkESN4NM?dz3!q03TsY zY%2xseJKdfS}q6w)cJ-K!TEUnYfd(3_T_l|zxvfmB;*muttjJd!x_)l{{XF4ZjH+= zK?Vb4eSgpNu96GA>y&K$YnIBG)i#Abx##n(dq{&|gO2A1y-}h%L}Y)y0sjC#wX1Jt zlCI?A>CfnYrDj`NhB*L(+#ly#){!)L2c~;-MaxLL1*ObjYyq?R{{Sl0hD-QcVFiES zSgIlCkM19)d;3Nv{+yauDBHETotI;bgTd#$Y{wjpkCXsAcK-nD)9h`D zMIdBjsp@N^w$sZIk&=4~hQP;)%*f%GZaoHn!nzwRCP_#NwbSX^13ns2f$3e%j++g- zjjMtxFLEU`j&Duy>c*kx0Q*;EX=565v;o?zMGz;a08Od&IjJa%l z)w)KKWqIW(7zfg^+IDV1yMTl ztW|+i>N+37y%Wm>{Wz|E_e(Mz&||ONJu1^MoG+?5Wedm6lULw)`{R!FUTa&p7>0>} z>A=l3VV%C}^Yq8{redy*NrklOvB(`cBl%TpdG2i7F7Ms*h9kGXHG0QS^9X6P>5kRE zCZ7zD>?`u)6kOMpy-Xjqi=z~1pTo4{jHu^7=l=k$PiW;vV;&9&;}`%R)~m^4`h31> zlJV#6`T_kZ=HWrc?ZcD!{{Wx!lT>G_-1JQgTzC~@Nc_fJmvVXmzyNw?yF~E^nQ+j^ z<{gL_dJ;#crFd1Hy!A> zNYTjd;=Yy(R+VJ=w}3O)V+W6Z_0HVthB!^TIrCVv0uBZngOAFv=hWnv3lGW_K|Jxt zTFtzgIineDu>-GBj+m!nB{B<{*fB!cE(tmB-|)>~%sI&ow%x22W1(gcc-<$m>X| zR|ATvA@r-VjiUpM)KSHg%`^TF`=dObzy7mbMXU*KnI{LWXDL{KJb#5UTg6q& zH%^qrv8{J=wh~F_9qWU?x_ha9&WDbSf2BV2QZ zmFd>Dg5DmS(&)(o1j!(DYNCPuqu{`>7ibFc9DOfn%Nnb(#0N1XP z(&KRjNj(lf&v9HWvye|bf%<#a^{A0wg2TB79Ot%cDIqj^6`ag|Q`6SEi+MLL4RE$o zs`18Gt`DX-{Hv?7oUrHCw52OEj;u=;uz84k*N*t7OEJoB5OCjn>+4?NKy~`pA+Krc zHM_3d=DOc=8gl4(m9(yr$r(7tNc<~4Suv1GU;+D;}q@syYhS zn0%5j80U)5WD47dB-Qz`)N@o_Ngnm0MBJ-*Rw!AIBdu}M$_$LX%^aO~JX`c84mlXCtV+sjU<(VkSmx)o2i;woq#Pe1HGn$K(BY z-}^e}JYV;o`#u3D6oT&v5ckxsRsmQ-Eo8ts=3vqHy5+cM5fk$nD$@ z#eg(~eRT_EWTLYM*$}@A%DTCTjL)YnYD)~QxoJEy92<69y_`E~vXMY$^v5G6T+B%G zs2cIcU(p+fgoGJd*ed1|ChI9v1&14$N<+e3)+zkQTDvXuABN4)b=~s89OchA^3oI3yo)rJFt{!7rH(9%2<#bJF8kKFJ)u~&(e(x*{aQo+5tRHB1 zl81hAR8~BiHzu?Qz5Kgt{EO`abxHKq^yvaKFJ;0ELZl$x>)Yp=$LYO6FmH~2 zS113^8hswVN`oi;uUz-On+JXauoFrl!pwF;J)*os%X^{H`zhE}vf0Ce;((cn=)+R7 z&6G`x{pem2soLG=T!4lGKrwhNl$vV4Hr$<}x|)Y>iB-Yczqmn|NUbgZuQb!?u(7lP zNaKY&o6g&K+^tI4A6$ob*BVf8cTy64gN8&MA7*q1Xe}MqTE8u^bgIg4Z6DRb*8rCR{~yY90AMEW~IN#7gELrgzL|> z@^*!MD=HUzj9D{nzB#t|Nvhv^iQt2=*|g+y_mkIaf22tu60I^utFHh8M2l8&$*AAu z>Nh3)#@rGX1=YQDXX<{uIt(*DYNf5zs1m9yTPsMr(DU|_AA5rBh3l`mlR^!N2-#lTbo#i76(}z|>9tedk4!b^ zWB8ANt>`a3uO8kQzIU)%mwKA7Z?hr{fnudST&ChP}e{)@ou$1jyA`iFvfAR>Fp?`O)l9P);|6bcDv_D#5($uzqLE_`|!3Mapt^r(mnl@)|C zFdkAl8dfjQ*%URAKn+0V>4X}N&^ec1gOB=wfc0OZAxubP7Y?lfNQ{Ch3OT~x+WgRD znT#;SCzVNkO03pBtZL9u2FS4MQMTZo-cth7h+!0rOFRbV2nW- zXoPb;`R7MY@y z2re;R$ia9)BASH&eSwrP{&=)NHE!o$8~^P7Wpl9fx?W$hy#IwO-E`|akh%DCKoGeE zoPoB8=+8@doq>h}< z!7o3WG)ijve1X=12%{f1aX09^`xZO>r*A?oW6)L|=iXOq>+V|I>J#&x+2@Zxf8j_c z{d-_;<;K&ndmAEm*M^vpF!lSEF`@US$=>=$Q9E4Zmxkr{Kb%bhT5vsTiRfPOj|JpS*)8^ae$ht6F|2gyK*25=QRU8{gb= z=X9l~pF%@x+?MS{+c`c|tBfQ#eGnDY>AyVLG}=0hnuk=T(TOo9XD`@E7UP>`_6wfA zm-1SsPE{&{OBgdb;7oNr0XEqr)(fG}wN!IG00Fz60Xf4pu8!~@2?1MzzS0N**rvW! zxut1}UmLyHOMvgQmAIoa=Por|swPnzEu4Ns7rb8RQPEHJ)yN@D33D~GSN<`5`KPMQ zyG^X8$L}h&LYVQ6n91jlA^_$Jtbd@0sfVn6Pw4F&?g4P&=iux7oJX9MnMI z%|E>Kv2G~*t9a8`y@L$l%jC~|D>hgC*2*txtI>^nkf%kYKt`^Z;3 zqWQCm$B2e+kj%sCM{c2XGo8|u3AqTfWM69KO1?h9#b%iy@NsSIA1h~Ke1(F%8el+x z@%kzDtcD8<1z;e$gnrMVn_?*IX&p-s5%3tmz6=Peh-I={^6!@&cnYhLjQzWw%&;U8 z-7nvwYRKz~vZ;ZXXy|m(pNo%KJ!j)ALLb^uHBu!C$EXtXU~)QocOkX3KmaSL zKs%X2u*zrPI`!|t5yqp(ig^O}{<~f~XD_^R;#F2|kq90wp){{~u-z0?MgtcGT9dp4 zZgPxavAdl8ClJ9D`}oDWm+exatY3VRP|I(*1c{eYg_PPapL@$8#>KZg=~hjjm-XG= zd1VbexNsMmU=?pzC83)ArCoeMJ}FUtr@5-Uh3XCoVBmf=+xHSgDj7HJL|%qoI*PNq z#@^~1W72+PqNcM<#QZQ^JW_e;+&wSnqx5)aV9m&v8+1I_7Xwvv!T^iKzJ-P-UB%a|KW^v2gtIQy1da`zqaUz=hl!1wb}C-EHy> z{Ws(GZK8J&H{G_VkP;90=E#|!p*DG|6RBG^lf9Ir9bY6-{{W0?2E3%FVcodK*_OwA zFBeVMw~0x0U49zdOZ0hi1ZJ-r4hG&}4C{ppMYRblTf_#&t>NySD12SFRC-z?PnAE8 z0zU02B8{WhJSpZ4oQA6q0Pt{zh8p8E!Vj^Zn+Y?_P3`&#xTDLLR{)DyQ^S~c5YEqF z{NC|su`OK!0qfLKy+dAq^YnPk*Lyi?jDfQ9Kv-wlW|@vlYwWZ6hIAIK4O<)YgeuRM z1k+niOVk*{rCRgLn2j2k-Xc`hvSKQir zj;i?m@K07|lJSbc@D(D;f~aemjcUMG%aGqPNaakBi?vWH+|~A`=~-`)5*N8ixzx!8 zi(h-?oO9!mWxj2s1yN*=X%^x=w$`F_S}&}TidNgg$={W9@mpGrd^F8HBOBn7{+=r! zPF8WCMa0;Wmm;1Zvi;kfLiE(~bf$bPCnyH%{D;^IT(>%cY7vSX-q6}dazPv5Dn_M|9uQB{4v!w$Q{-Q% zo1OK2551UD!4we1jJ#&P5$yWuv!{y*uo6EsKE>i>~n~X%kJYuQF*Y<26gs=gwE%p^xW8kWBCrho9 z-OlB(s`8X9O4G+=#I@{o2I2fH;?_zQ%(lVI6lA8wAAGfvBEYQWbaYGnQlmYk;eySN zQoaO~e$J#$#$3(rbKU?TxU?3V-t&4X20{s>>Q5TlQ<<;r@1G*ScerbKuXUue=SSqD zo8%@Y=AMG`R4)2g^N&rh0Cl;$ucgzL-5%|WsMOBQE|K{6Y8iAq6UY?WQETJj!kY|K zE+5xWKK0Z-J^h06!nvG9D_w$*A2Y^Bt@+WSe|5|HWzBOwuQ7B8@N_o#;hlt3@9*spC)DVpUJW`;bTGK|jrYfL!7Nt~*nVy1 zjF~xe#(=KXZ2lHYn&t3ISrkMtvtvr>b$f(OgpUQUFml$NqqCfPuM2W$(qCy?&KJXuK-LNt1c;cyXq zOWDf0o*f@yrkvJqoFQ{N%$f67qPy+?UQd$@@9|lswvCBveW0x+Tcr=jl(;E#%9D;( z4;S9XiQR%j#FlGplLtGc!Cp)8wwA`Kbf!`Cmb36*86NWoi{24i-OwNoKqXB<%pXwz z7u;P-K^5e`dv;tr@+`GcY#^8bmz#mC4DUe>gbl#VU+5$B(-vzStqJ6@`g zx#vcduP~s&5EGi?{QV%*+m&i8&F9d2X*#atROz_Z88bp?c2SmNY;e*`b9Uwv2Z}EzF1myU^#fqYjK2? z$^oNr1&~|!-}p9K2(zNd>>-iP-EGG7ZgNHVXvR%0MGsls2{{L9ef3&}U1y$U(wpi( z8hT>L_y7AcVJVFev-Xzd$71W{{s}6+{1K(@_wr?WfhcodFHdFc9PHe^=u3_gaj^Q> zN!(Q3DS-xGSlQ}b-S9;e?V6dIsh`^}v|~tonG&SVX5x(LfFovFeZO~7+Ea(c4+)GU z2lt1{#BUG(yb*J;e+F;v)80=a6-9xMYku#z`L-q~%PN?Pg-eJ`0~??p{`WKcdYN2t zwWd56b$SH=*Xr!3rKbWR&`e?f4<3Wa&Ang{A>XYZxu4CH?7Up_LS)9V_f+|8MxkKX zw+KWub#j`2nafYPpRtJ~A^#cWl|^G#mTPqiJrjaKj}h=9cG%{aqm@0s60}1mT-QS6 zp|X6WP%EKloaVs-c`bcd{79Cp+n6f@Z!Wl6SqiH+;{9*cNspQ~0Y5kM0{6CL6ySM8 z!G0m632OB+>1DPjPrWXwl<-r)B{TB3xVYaSaEt2MF!ZbCokbxLUuCqY8?Vb4D4>_T zq|ZdmLQ3H>t@UG^|P(|Ctf|B}T`eLK3T&kybARz-l*ZM890A*26&-aqv*s_ro) z2%^6$=5Pq1StYzy^NAQz&FAWO+A)nJCmtpx#lwgwlU!IG-b}7Dr!A4`V*26*7((e( z*}xufTzqjviRV?Ec6>^O2~8}`QDZByCs*OK)lZ9&Xj}iZI#{s1NYR*+H>0+2a*ffwD{p;` za*PT-VTWz*h6Kq^cYF8j=r2P1po|FPndu+xbiwJQBmT31I?1lFY7}tI>k2@a$(%WV zdoXtdW=1bMM>fJY{?t7&Y`;!eCX`u9Reoq_32fQsQA)LQw~dc^>BifIGBzdDnu_XN zLl}67BRgp%{s;f4@5NV!nx}rg{aNdSMB=5YiKYqy{=y~shJu0MjvTUc`UVFRju7AA zNj`~N#+oc*sBaHD1`HdJmKB^kad*=x9h>=UuYJ8B`jJt2_t84h;#$v@GdyA<_UB?|$V41$5dWDsWQh@C7w)J8~ zu&!_BGmKTnY6LUk?ESkEtBj|lDs8y+z$3ZJb_(#4gb95?kIxybL_hqY#(>sAS&hmJ zx=SKf+r+8vIF=yaPAYp?#Y4|_Ewb-|9b%i~)2;?aLWP=~pQGAm)hlnr-4Q*O3f4+! zt7UnZDlRM4=s|v`OR=XPnObC#2A2H!<`~9V+cp1R)&tz2jojADQGuWe7#mK?L zq2)D>mT0f$zltZCP^D&^zK6td4JV^17n3FQBpVQG3DDfkYJE%?$3dofaFL0%1F`my=h z)3RA3_M)^6n7`vxb`(6!%7=lN&^w|bX|c=AGg9gMJtWQy_T;D_5lu9({>+e3|Nd#* z_Ck)NYSPVv6wE29iWU?^-g$hoX{dNm&;q1L3Js(Xc=jxcaS0J=c*$(H{pqi$OgR1z zbfAVOZyvA%JnYx*x6`igK?5#a<4ThUPuZ(rfbh`Cwu4K1MPoveb^h?@w!DP+g1j=W z9Ntr-UhT3`wPETX6zA$?8!qGrsb#~g9*L;a>9>w48i9mw@{vf=bMN`4RKK?;LwM*Q z_SbT&i@Zi(m{R=;GYVt2mK|+65wI_bF58Z>-JW7L6B=^A{R^HZb=MuF# zTu9z|lGXWD0LQ7hBUmE#C-`@%RczCKlGo7yZwN|%CV5z=^DMp`?>Z2959`>A7TYw> z0>sV&a#bXpUdPVG8fAL_2ds5-kNK&tzwqVZYxd*P4~|y==r3m9oA6)~Gdd^2pyvow zWs*j&uApyc^~V;05QMlt8>R=3$q_&-M0Y(n(Eh zvfQi`UEw>wS@ztSX5V=4Py##tVKQ4ohEEf04Fa{mHr=;`fcc5$fd_@We0x;p?r+%P z(6i}Bw;B~`xN*J_c~l;*Q<=8v6kP8lkiv;@U#(Tf{Jq+bFLF$bU;d<)B@z3d-c>J~ zlkGV(8SrhZwI6<2aPOC8{#i}@sut#T ze1rlvhmh9Zq&VBZSJ7p~UEB)zY)+VEIbfm0MUNky6CslZL500&ap8tkNfUuV^-3he zO739p0yb}7%%F^$^M5l)DnGb<|EG{L&@XD3xa7>95FS!3T$boSH5*hD$k4X)uQ*jv z#peLt!%0(XeKw=A7Z@lyAfNXX!;RnvyUTOHzUeWg;BS?jA}^+wou}I#{j_YV>n-Ipiy4NM~8@= zc@q_YFzBZ(Oj3k7TuLd_JfH z?&a3;xjUwM`gNa>FGTp~=U?# zJIvXbb$ThiW8@RClgNQ~w(<8>LW6UIweI?>j7c@Gbb7dSrepnk2tLPBL;86}m`5|z zH`#qI{my1NMHZyF5r_Su^zFB;QpIVmcX?soIZ4tI8{*J`V!E_-Cik-<>HIF4wVe5m zHqqg?4Bzd#5|VVnT^nECLHV$5KqA>^DroV2QTYf25@FBGZI9cv2w-nQeNXh+>O{h}{*woizCE+)XcxLnAvV6a*6vk%HbBkZT2g(iv zmyga{ankG(wVzE6gEaW1G9<+Jx@|7oPA&s%!+#qg0?YXpbRe#Aj{`!smsrlj8UEhe zex#tp26}~(A{eTKMlV-ZAN2eZ>?u(1ULXqnU_eJIgv)V#ul;1nhIKZ$!H<*+SJ$@| zE+altqUvV2Y^bgMWyT8(q&Wd|CF%cNHLU00K)uPdx8PyD6T%^!-MN>L0v#?s%l*~5 zPgy?yGc_!>=@J%e$@uq!(|v~@(R1)fc0FMFNP$LiUN*Ce=l%3;hXHxhw(!R1ZG4?V zeu!=8IBpv3(KK)cFzFK{(91>eWG6AoS;j=@`54q@@yN=`LK4Fmj!h6WV07fy#iQ~? zwJ-Loirx`cC#2s|3THFDQ$SYW`Mza?dF@298hEfa_8$bM5@BF^P+I*!uU`9h2&lKf zQDghOpE|Nsl62P17ou@`vc}EK67;9Cff+@YjA`=xT2Oo*;UWOWgC?^Wp0+9&D z3->#}0j%WIP>apZl8ob390^>#IGAC%CZE-4)nPa~@yy*C1PpAwq8bp+u2gATsor zXvD-qX0}5ylg;uPpAaRkMCd*}sT@$x`A^mmS(H?w$xZtPsR25#%)akEc90qrVY4z> zuJ*YF)ao5PX4v65bdZKv|9t4|%m)u8Z5z#|eqE%}3E9=SnXEPmw)QRqL%hSTtIIbF z0X(+=S|@U#vhj8ag7GlmxbjYm=2D;Y)D%-s27iLMz=Lldhv#FK<|z@I%U$okx#z!O z_7Ff>ch{Ji3U%KfkcDa~dE}ufV5z!)@A-bi$_MMIL-ruTl2+`1Q0MM0EqI^wPFbn^ zg&^Ixm-8Pv3EiPa@lTtakpvDuEu@(+W)w^~Gj2|mI1IdaLBahB@n>ga2*SjQ! zsj;?aZ1b9f;*C}VmV!|Y6`=QiEEBs{7vIS>YD*)0S)I9t>qQK~B&LbS>}Q#$MX(sz zQZvkZv9w`C@~JZ)dj_SOBrCVd7KtQd>6&snk<<{GbgG=sg{b2*w%-jcI?6%^6S zg$XUow=~m+>q@8?DUbx&D)0MCVwZjwZUs;=E1%qMHzpa6Gd*5ydFF$;)Z4zHhUD#h z32CQ>VNq)Ll} zMz_7GtPR6q%Yp&krma%BjqvP#>kqL9vB1eijw)3Rc03^$Qeza-1 z;E9V&KjE5bOqLk;+9-Fw>mlBwe2!4tUb#{D>Kivf(u9u_|OI(3EI3jD=68G@V|B7 zZ<#wRR*hOl7e8jXGL_EzLt84moT9OHqred)gB6KGc(mEl{CAFqd@H?veNw*DznZgq z+wlIzJMI_Kc&Ljek8h~7*x%d2^k=Nmza04y`9JM4FXX1bmi`Fw^;X{-gx1+$XS63Q z8~uZI?aP8O8|p-(r(#|XxCHU&SuXL|qb;b`aO#=ao^P4}i9k}g4CwMr(>?5r36BM= zzM~-ao)bTPZKhj#T^s6!hwk0)Jy)fBXM#LfAgh9<(fWoxZ_J+nWM9?bbfl5^J@E=_CEbmbznM2I5iLJ7e5!XA-%MAa9tzfPa z?UTpH!8mK_O#4tmpxmXgw36TEVO|%|;w}&wWs)$;>O5j~G@Rh`*ThnkPKhmC_+d3q zPwz&^u8PLKmoW&#yLVC<)-Y9KnK<3fI=Fqw^pI!KEsdGxYlbqgTuN;EW)vR?)gml4 zAy>Yr&j=T4FQ$%N=Bs9oGTu>dHu@$s(!raSwn(Z(Eo2y*!Q}lGo1drII9D7uNiNrt@8T+6vByNRGphF(n`$lpTR zF3y&%*t|G@(DXzCt^QL5ddJ7+$()$x2PlF0_{n!mm5v~Ux&hz_;-~ZKr~H40Kk=*y z3-J>$x)j-1lL#_bI43Rct9Bps#=7De^-Jp?@x`>yT%m>0YuyQ@(D)lewU%4CQY3Q(dLsP%X2tG za{5?EWn(Ml><`pz@ns62G1`8sFUV%smsT80_bB~#*#;Jqb6Y*j zhZC5Sm?O-TnNt+l_v8#2CsfPt_Pjd040k(lQ}t%chh6%e-dmY5=Or4}%x~t}hEj&2 zsu-_@OSI5;>r3#$on7PTDAg{+&A8if%_0i&GJy9oYRccKu@&Ar-O$4gsVJ}qE_eKLd=$pdL^h8KXpET$q2+zHby7F^^fm&M zeN}yUNO|600J>VO%<}<`kH5aMbpcI3{wMGq58SIh>1G?Q-$d_ROATTZn&1$-TzEgj zcu!+!Nn%@R$1+uuD_G?wH!gHl-)UkY5$6@206{M+pC?Lj`|IP#CpCsBs5Z? ztnf@U(;r<7KtH_fw~C9u}MFg%{q`QgqL8VALuE&h`4laZ3fRgNQ|lHZ!Ob; zNmyvo{~ZVd_(p!u=Ju_`t(a>PYIU_f9I$b(g#Z`3%noms8xtsQ?u0od$C%SPK6i#_ zo*BzL)OqS?1LkkFA?kJ*+ZMii8UWq)={HGulKbm>F<}pC0g!o<-anp_%uwo+3#;IJ zhh_uNb_u-L{iVRLKc+EzW$9w+=?C(iGEuoMfPMc6Vu-pCUq?&v`imk+QBs(o#nW!qcIQ) zo2=|;7?RvZf1PrqMxWhjuNg9Bud9`HXmyqjZZpvIt8^x&~a(D-287{Y{ICdb1TcTz>qhMx;2(%p~~I^kX^iNg;j^fB@be^R!XpyaVkl zHg$M8@#BFjC)kgpFYNd>pY zvhlID`~AG(QUB5^J{BH+x(lZl{zlb^7O{5u$PES}WOxr0Oyrj+k~bTFhDN*;_WJEJ zG2kw38NlyL$XCv7i4zc3=O3mIIpcDx^Zu0ON@xR;s?=^hsAr#*gMw3_AWiyO$!79jt!d_^M|*(1a@(wq(*d zgS(hQ1VSP)la1yN)V>q)`wY0N2Gr5?F zpT38R%9j5ci8!C6xSHDkbXyTygvFnlF8!i|iEemDpTEc-H~n!hnBO2V{0dMKj>kF7 zmzoRUyEnCp#zCZF}1XdA-8Djk>@ux8?l)no!)Q z)w#h+kkVHhBs^@^yJ43XmuYId$>9w9-VqCmN&sU23q4Lk)JDd zGX-_9-WcZ*QDvmCTvN4Y%Jb#ViF994>-WYG{G!@(slqB-UvgTnZI z6yR{7OlOr|2)QHUyqw2gECwtr<<)Sg4R^a?53GyGP*A1$VYrwvAg?Du|J?V+Q6&S* zC_KC7B&mwtUC!}3615x}1snCfdK4T?-XzC}cjDsk+zCcJoJcg1*kI|3;Y-@ z=Rk%!Bq-S3J3`vne%T;Xj1aM`7%SPw~ z$v-ek zy!c45C|2*;MoK+b?|Xi%vHJ&O^CVfX_frbStR>uU^bvQ&wr-KP*B7MHfYjIzf0f>- zsJ(sDQE$!#qmRX(|8D#?CH{1+jljJ>#dNdOzVvC7JX{^GbcdgAHSaxp+7OtPx=@ui zh&;lIz;TQFU|D5&DgQ8OphTQMv$x^U2eT!>dYmfrfd=3?1W8i-Wij?>arEOL25(1emnzYz+jL~B^hF!4`_yZTv$=06 zcKPL#@f4GF4g&nLg!WAXN#m?X!Nl%Em6hU*S3JD}cuXU%04{$#dk1ta=HJ|wl}hmv zn`yB%?B%!+mi8~wxgXu{q9Qhn44rm&F)LfKaKBd@d0(U9Yr{~P^9TmapK>WV_C}h4 z6N@Xsj0%7hz&Kc}1!E&DIb;()Z6b*QmRQ*(oyM|or%g2!Qx5jr zMzJGf&ktWx<=|K_98`WcXZidrrBfXe0)LmJ2WHYgb!@=s4tLl+1j+c1d1s!eT5=(w zWR{aZ@h;41&zgcdJo{90l@Im+P$gaB%fgp4BbW!OAto7M_t#f|;I~{!k&|7%Qlmo9 zdFH9-hM3xF4BbBHx%(#bH@zF+D?re#^AB~r`;Q#?hH(czvH4z+LdQw$N_X`@>>7ac zgR;~3eVf~D!7#h&EK<+^PF*U|+~98@q^^Cur?IolbU;I2wzES39hKy-p6YHIfu8%S z0j^D#sd9=v-kVF5SzD)Q%}YoIsrnK=Z?k4J)+aP?PjmEnP~>IEU0KM#X|~wg1a87q zjq-!RS8c`rkU|LfsBa%biPN}Mg|A^CQzmdW_H_E;+s5Qu)?!)&ZlA5lzKPX#;?Qx$ z8ef5}wM}B5zf{hHS~fux^Vh@gr#>m0#tu(YjhvxxWf)lOe&A{H6Jf}h3cC;2QQB}Fejfzl1>w)au-n`QBT1b;n8?snCfOU&t;le} zR8bMuP|nMY2+38?$}QT4%SZVHlh2gW>m-iI>;eZ|Oil!_Y^uvL=4FA;K`SMfHI=Dn z>aBw0P{xIf+ccE6=Wk+M$r^60#wnWo>-mR>N7Fgv6~hh)@6Mc|qw$5jLUNVowKR4Z zJt!)-EspsBwZ_ZJS$rK&Q{)%b6ByR{ECXr@EZr60sfGNT`rX8&%+*!)^G^0zD9_NV zhkwr7Fq}md7j9>myQtNAO2J-2`GyN9Ff7Q-KG@Q0Dj5$EJFm)V3*q+c3F6K7`rUjv zGdVExwmT@c{%|ga%J1HrE%YE6Z0PK4{JBHWf9h`mf&^Z8*A`U=StMQN9_@% z7T#VgHi7=GWRv%kXmDFQS_257ky4SRF__1NT>-=_#AM$FiJcnz>e?v{*6ODWza8|F z*l=qZ2k<9r5GPCisaU1>yn1hljqT+c;mK4rS6lJ~V)029fLbzF@hgDnS-|<%={fv& zWWg2S{m-{Hu>iu8z)+CIq{4LYN+T-WET$_h(KLMkXd0&&sk|jOL&qZ3QAu=$`ckXP#z$4~qDKoQ~s4HD9L} zL#&e992!)S3uEd~sUR(Fhp=hZCvjWc20ro2O^LpcDxsg=5|)`ggoEmNY$8#k)qxJN zKjxxfCd)L!7@hAn;qbF(%SB-1-~*6?51a2i2L`bpAJt_42K~{z43Myh?(MzK(I>?c zHw^7o{Vt{4L_stV2De-dn3HXAPdT3Qbz2XHqnzy0=JgStzso8v{Uru*#AOk?##`&A zu~f?1qL_WHHa)71ho04jeqJ{=TQ`-I@i|q`L=v%id1GzC=NU0U)OrtgAcn2s17`nSY( zQ2YnwX51L?h42lM#5(J$Hz=5=`bG*?`VV39&D8BvpSG6W$3O$0bK*nyH%g0^2ydb3 z?@dFi)!6_7r4gr&-2r+A3{fYV4_{C^TNYbCe+5E^NSSj2`pOq{U%7QD81Omr5a9Xf zFZDwwUlhxjKaSt@yc58CP%I`$)G-;P+)p9uE+UO4YnW^~g72pHK^y7Mm_e(i7&FMj9;;KBylI==Y z(|aLCD_s8)phEwgSF8(ODM0N6!JbiE<+#hw-^4UJu@0AeJ#}S@wAK|9m!qg#bZ=l6 z_)96N|2l%y{kHDNl;|($e_{@$`y$A37AY6+c-s0T^6=VC*P8Pf&J5i8WBZzTq6;0L zWvo$Qde7O$tJpak7PvwYG`Iw8Bc8^uH0Lz17%b%W&P{I zZ8Rr(38ahx9dl{D#_%ZO2 zm#rVp0-Lq70D_c9)nER_kxD;A=(ya>b;5%|7`OWO-04>U(#*XKAe=s=hSl*@jb0Ok zFA;4ot(9T#)+QGcWHuj-=pL?m<>7^s`D;`}SBN2O=2Ba-Y{^p~u_=Hc=I#B|0pY)S8PW7HE$KHU0?ijmq8&$vn?wdm<5 zsh%_awmNU44LtC<0?d|b=6J(vE`&iRqj6t(r;HO zSHkuVxv+{BT5qK47<-I{i5J?fiwTP{L}-pSdi{Ag+gf9 zB#5@(lkG0;8~p*X=p-*}sU&8ZaytL#5X+Ha8Ws_&_j*ti1qcEa(A;^FFaEmwsn(z3 z#5No*bZY2_P$@$1>- zO1pne9412vd!IFd3I}fKldFarnZr}h(LlG6jE{HYTLp)XD<26>N_dpxrOHYw_|k~x z+ZJc9eljf&_$+PK>__KrJZ|_k+5Yb}g7#-C&>LT;HI^qpe5CY8F{YVW{0Bvu*4WfL zZ+eMK=JCFdT-r`4&r-hQZdK9%k*;6J8HVK;21-{^v*lbR&lU%VFp%yNfRikrmr&5% zq@L-3B^|M7qJ{Wv&7RF!W`HqTF6gAd(T@}S^KZ&b(XHs&{NdW61S$$X6Tcl`l=@86 zLvuu4SwWTjL7vZ2SlBd24?y>H*F-K0wrvX! z;-ho7NEA$iJA;fTX@mS6vbAzMQbw-HxY&f)Mdm3MI!t@hf54m4D^m$qfJDcZPqcTK zsCnlix`Quc^8C0R816gW{2rsL+jm=-(|yR!a8Yv>~bH19u&1l1sYAa@8kr~WC;aqg;rQin{u?mbfPfAh*VzT2~ zj6-f_A|*aFL>m>|<_WceH)AmW9fyBpUYrF`528abuMO8yjqD#gDB2W)xH2uOFcl({ zrpNP(=UQUt`+93!D%vR(^;~lFp&62E*!u%dGhV3(3Bno>mn$uV2Hd4FT`ug@Q(W38 zlvMpnB<4|0rdf&GO1+wE zbdFE54!zR(1yRT{aS6I&(b&z8e4petoU7o)im7Ed9TSysZtD@|#?Lg=8i}I4g-C-A zt8^9Hi&uaev&Qz`#b2$byGozLYqOH9!L?q0Qj*<2x23Zm*G8NR3o}=#8O%lG3%aX< zH~Vcg-4Rm`j!ifJ4`@J_zn-SG5<{LTv?#p|K_Smte(=Nt$GvAPq!aJbrVsRGz_j$oL~%Ns2#qQLh2)qL2lTsAD%oCGDjr$s>2d) zl;M;df`9#V#6t-oaCy(+{{ZT#gpw62fHBQX1_u4PBh=&kX~HspbTN`M>p&FM<8!)V za=kd|^x~38gu~_M{Q6a=v0;YJ(s?6155lHcgm_)xb{Qk7{&Wb%lH27N13fqt_-#Rr z-LvwJoj)A?D^Kk4%^`~5V|gP#PWcsfI6lh@D-GcBpHMw%ipM=N+cFa$1#q|rBmV%^ zS&TxArVl6j&>~d`a!&wssJAxZ*bmIoM&ZV3 z+eUNx&;-%_{4?z>zy_~~hToSZhfe* z9Csh#TGsx2iE!w||bocF1SDG?b} z1EBu^Kgysfk`zgv`5k{6Xgh(!DH)_O$t&#I$2{ja;-kxCmON*n;*~?FEu8lNbox|J zA!flK9`pssWRXE2pKNBTNhDwrGBf^jO_gxxaRq-dRhhzrj2}@zTzgHxW7nlnd50PA z#Wmejanh<8GewDPS!CVELHSh{Z^xfnbc}iZYN>8;YLP4$)GdSFoX3$&eB%{JhF5Xe zR<4*pdFj=#&CD?OeW@ zCfq4KO?3$0D8@LXQ7+<{$v=fe%orR~3%hc|rvOwkzTD?Lf&O@;F&(l0R|-zk)A&eGTWRKML^rqu2k4lPG$21KR!uwk$yA2|6TrWJ4 zTqI|lo_(vP(&_wDuV`BXB|@0tYM2KBF4(;Ijg?Oq?@`S2H>03W9V`kMAV4p7Rp zp`?yl>q4)OO4$efSHydqdNwB5CtqE#=s`CJaYYv^AWvaXb&$pNKNk@Pj< zx{jh@Lu=%c6kxDBff>h4eqy~C9JKxuho0!p33vmZPGfHW08dZCxl7%WLJn}pJBB~4 zc6WC6)6DbRFjbK7t&}UCxZ@wKa{8U5(B?7+UJ8)IpQUu7nZjKSv9USG9ly`5XHL(; zIVvy#1KXe?weDjgUAX(q2j)d)-!INs4jZR`ZuOy)FfLgcx2ZTMq4YofVNZ+)UKoH6 zZ+eizj7V>kpeOOdNWLLK_k1R1LIofbVBgVwDF(jT%M2RZ4 z-{H@tb8B%Lwy);~XTTuzKhCyc9#VOrgPZ^{>s+?Nz~OrOnv_8Q0NyK(0Uc{t&8~tr znpjIQEJ*5be>&%H6L~H{8;)_;*mF`z6v_z<8$RTIl@dl2=0|1q7{L6-YGm8e!yg19 z2H%gEar#w)#PuC%yU8A-+nPg+4ry#67uZJt{xq>F5xI!PHci2?*cy&?AcyacpW{#k zgXFH#><9Ize9lWF04kv&h8XipfLnu#R~i4;@q18kNb6e`?XpGUv2EO=0(l3mYeIKm z`|;{3&eVNb@WWjDMT%NV<-DAYr2W;;W87mrSI{sJ(QVFg$K}WCUmpA~)FHRldY zB*$(K_lMo<^~b$^fo=lFoaa83gIbeN%DY37f6gnA)FSFXD($Y1JoM}M*DHJ%l{g%5 zYl3T2qMJO+Q29w1JxDpOPHgTtJf3U2)%J6^0CE0*{dL3MFFYQ#q)w87ZKsY8xS+c+ zB%TMhD!UR${c1TS^1&olgpxd*{yw!z>QT3MUtY$T(sPbK3accis~&&PUs_;Sd@(Fe z0sd74gN?xSs~e+`d-{K#wN_>$jqFbC9oe}!{*M6o#( z$!9fi#C7Jm`?>tJW{bIrJT5pM^5#t8`m*sDFbV8 zSBlz>T+=g)(A5u~l{P`gH9=dCTCjv?>(+xp@~}SjX&(eB;Qs(hppD+8--E|Kl!`Rb z$YOJWR|A3lD;P*|xA}If3odGGJDX8RPd=S~wW5+U42)yHt#e5^=qpCviRGB!tIKmByTl~O5V{n}+u1d%O0%#ax{an}cqM|z-vk&Jlzqo-=pBtsXNyqss(AB6yO z;h>s1rDn@70ddZ8{uRtkC}$vYKT7K^ZNuC&$;RQgf$PxvRt?>_h$~=jJn>9}IvD|7 z-nB4w>MK1Q433pxHWXlU{U{a73Q0LVJ$)+5?&AX|*NoLwKu`dBbDEiDVh+#4(9knd z7kKanJupRFea-<>`E>eLd?EIN=NPN8Z41f3^q^(cbd7LuaZ(lyFg~4x?eq49sng?Pcl%NM3@dNZ@jM_NFV3X8bAwz!Q!I zLh|#+G>CVeYKggEdsI$EO^^mE2R!DPEQyb;Sh9cLVyQcGj@2c*ZIpA?lLYlUn`Zl_ zw&8EOL9TMq!9Y!I!NxFZ4UVS8!w^X1^IbAX0&&wg_O3q6jP>;Vt9=}8Q%KPp0$rdJ zgPzq^c``zg*RbnRdFn_6^U!*8QOPC&KZOGcBq_@D?TV;e;NcX##`a z@#ts~ERy`)anh)?n_Ra{(^^dQ%~fegZcj7_w9=O&CZFbhwV${K80}IC6?@VHL1jJo z%{9q5&p78bL1rhZ;}qqS--AI#r6ZxOtv*H$0q8jV`k%m8KCBNXKmB^rwqgk5oYa(= z>>4HS-5JL059|D~Ug_b)R(4=t1gm!{IXERJohV5UTC_!~>juCaUXNb-KFE2{F46J9g-JsbXtO85o;_UnCrk zKMd#j)K@QOdJ(2zN{?O#_}5Y?o@ak+<+4bpJ6or?>s*!coSrye*LHvU)#{g#*a+S+ zC_bS^f0+KY!QI(JH$L(rVpK8iM^62_`c}qm9AxAyzc;5-?oafrSv_;@T~+gSB&uh7!0Y|kM~Vt z$vEA}{AydVC5Q5)P0{2YNySw#KT(1zXtA&p+?u9X?)g}bX)zOdXFRDG^ym3fY#8I8 zYPV{|pAEQ4r=?{=rAALqwWkQofxA^5sOxlVxoz$vof~T@3=e#- z{{X7K=kV67ny!-t%AA&7eTd+S{Qk9-k;XlLoqaR#qr@RR-7Z$)@aj)vz%`tP)zR*^ zXL6D&h`*Hbk_=;~v97?#B#h(KisUtgUVTXX{eKZ$6;`H{jxSMo;d%YfT@A8u-*bu%4I1vl?;90<$NG5z@8f8Rs=o z(?2azC7d3iXx8!@gdZvU)YvEKU3JB{kTS62n&Yl*UeaQt?w&`1c)u`pqOw|~|j@4xyRA;pVH7)1cf_dlh6?E-(0kRbT09wOY06d!1hDJX(1X3Ar zDn#X2k`F)it1WcoseVeR1oj!gtmC^ZSMK-v)v_ZI zfr2u;4!Ium2xZKV8-~aTMY_j$>*&}zl@$g9>0%T0MJPtPyx7)Y*n!- zQS)-m)Shu!2`F6l2fa#UAxH=F=7E+PLAxiMo|&lLUag)xbVuh0 zxTx{XS&4bg1f!|3ljRvT(c6%C2DvLHE61gDusIkx#X~noNo>Q9O5NNU2Om*f#jM=* zt=ObyT%V;$qAdo&&S|ePBc4r1oQ!6pk+Ga}+cXS@N5>+d-~HoN%*^ag(lAKpzr94w z@|>Q6ffIb7U>?2doU8#k>rV3t`LoALt1D+99MS>%$sUB$w+gBons)F8GtB^6Q;-Kv zDe6f-Lsb>|seH_iC>ee>{{TLo)76?LilTX#IC=>y*{TEpHeG)CijRoNFXv}*A?g5rKN+* z91j^oBaCb~=hXG>U9P<%f`p!cAJmHCE-qphODp`raxgK;JXdUYIAF9neM?WWjFORW z=Q(ZI#y?u)Z)J+z8xckT93F#(j+_x(HRXizJ3QpaA54JR_Fg)l zt#(IYvzU8=aPD%s=LZFkrhlDtxAzTqcM*{Msq<|gbOFu)>06h!*BK>n5q?$4VgM$& z3z$fhi67TE2lDA!S(&y?H0dT{ET9lLC)T-rL4J25X$*KHujN`&#rv{0xhDhYKl=4g z?Cit+UKDea)|=FWm$(NfB%Xg-&6+Wd_`x;PTw696k^t-UtXrt?(MO&8&u>9lG)YcE zQZfgoDNb2A zV0sKzy#Q-{&-a`3r7~m9Y;%pGlw&FnRUJOGz`KNN(|cx+ix5O)ofkNcHTk`HsP%cH zW=NE6Sl56(>UdZJ=BVPML3sWqXs6_{)o z9^BJdG;yo9T;z{oT>PMA;HM-NKD2p+0fI6+)v$7*heOQ@io|d;k9Z2b3hv98W>~$-7RPYWP>0AtG z%skZjfE=GssI8j`_DGDU{EMaM>YMxs2XIqEz5`;Y5f-Nb;dGn|jcy3J^USe))sN%lY9 z`XAySjc}Jz?I(Uf9dZ0WPf&YQ#OgCHP6IOFjP$J2C?jdl9qR1TgP%d`RAm5+sQf)E zWF}QDf%9}c15n7sVwI$95C9x=ROXj%3Fvs{n1SW!1!l^_a}MUDb}fU9RaYdQxS&N@ zKpf_?Wl~OSO_hnN&mK(#nXMi(nux^Thw`mc25?O$k}<_G7|m{W^sF61$Vv~($gbGK z+a0Sq=HDyQnq;+fd4$k3EyX%Haa|qdmpqQO&PxQ5xfsQ1=8jgHWwem?tBBd=yWa(T zI2!JUdE(t}?AYm6j3Y%rJllzJovK^qMFW<0&N~8W(OUdCwUo!G-Q2&mxfoyg6XQSk z$gVl!p%==o-rv>e}MM;M?8CVvqoEdxOXz_Xn{ZYf4q&Q%Jpk5S=+gSFv#zZrp0%Y>eiy=Tpz6 zQ7jCbm$>RE0~+*gZO64|`J@rIy=+}Z#RhpGka5zm+7hGY1CFMFT)BbKFg|FEGtNEn?C#HGzph=OCFi*V$EXMe3lhUX2vO4}X znURh&%^{EuKIrtt0dmvHjB}c<(=xDPKOgB$G7KCZO+xc$jwlf3nUn@6gW9t$E*+HP zH9U-=PxPv80Uq?pW^2lQ4K2GJDx)50gC2*PyA}mej2^XlI#L8^+rHNo(OXHJb6l#N8r-&%0Are# zYMIkp%Aktq?c@xewZ_FJNUoO7apmJVG>s9iPSxags_w@*r7{-dvGt}i0tqMUNTRHV zt^fwAx#N!2B(8haM|=V4NJMPJ1B23{bAUbSIUIEzshJ;_Y54EceUC=nAv0CoX7e{%v ztgoD${+Jw`bgwPf?xl|9(hNLl!m5%#Gm(?)UZbzxh^`{Fjq+fLnT~lS&Ii+s`MBBz&t2TbP>-yHFY>wjT%c5?^c{NF=4J)x?Lh=1gOzF07F^^GzDzUaZ95xg_ zPbVB7=|-fP((ZdrqZ_7derNnEI_RjpKmpqfcn2Lx?^doZ5lX^@VGk$i+Nnz@bIA;A zHpF7;jlNY?&ja-}mRO+*zj*Omw((+&yOM59 z!F;pR_iBGi&r`V+cXrJ!%gL~eX9^eesF*xEByh>Ka;kch>(6S{u@jP8%x=HJ0ILeY zAFtd_mCvW1WDSxAb5;+SS-x!V>r$i!oxRjCrqIAYxdN-an}+g!W8SD6RAb2`)S?wb zo}_x5(agY{mEo9-9tXLqv(3Df{w^u9uu`fpFf)(kRLhL9&vT!`p|B>&#?|NPRum#8 z9e^j22PU)9b;dKcqmpl!mFGAgfT_68|I+Z3v(i0l0Q*{{x$7VoL3cOTHNzL5L^7jgNGb~E6lIHOylKY+qOUWlj+*M zlUj|+lDi1>VpsnF9>M-qX?1h{y<^9b0RR&bW6Q`g zH)8>FgUxa*NjcBI71K_loOz?Ml$D*C9&#{y3W^ajg+~YStq3A3o=Y9{C2gZ0qCXeuR(5iptY8%iG%r)-P49H0b2=nq`Tl z0#3?sR4G2>R#3%Lmc=?!TC*vWx93%6hmI?#KiDf8fY%Z}Id73hewlCQR4$I2u~yH` z-QA=re;^0-t>ZhrnGQDUaTj*r@O#o|I!>Rc_=io?^>eb)OQ@1|91{_7xM$ZDt#ueV zBd34ythY_r2ZxW8J^1bQ^sO6qcHGLzy-ug&w!d$p!KV06MJ)xaM&fxPQMGrxqzq%O z+!g1b1J=H0jvYq#_lvkMu&<-Fo9_#Fs_x5L@lBkvUfDt-Zz!*oI0Ud~1cRPWx7Und z-TX$=w2PDdkEL5%#=I&x^LByQZ^#4iuQv}?QKq3zo!?%Dm1#y&a;ZBmhmh&>>slSs zUfD{@@sM$o&3#F$+v)x(_(vazL`s+1SdQu@Tyt+TnCF49{{TGX8`K}V8v}z}O|Qfs z2l&CF)4WNg!))cjGua}=BM(e{-^gduy?eu65D%c=ctb~>uQdH)VJiRIeY%`Gz)90|WWh#G4I(Ynsz8i^3F~?c?&T zxZO|R7#*wGIMcbZ8HNeuRLdw~iqA(NXE_xr5+9~P{V7!7vp zcDFoc=1(f#-@Nk35-)r);lJP;s%FxBD`}s%>z;g_*3Jfyeh%da>0HkjO7>3vnwzRK z>a1cTk~X&<^)tv08`89e*MXs25A7L;sWB(?H4(q?OM!;Iiw8L-C&%)s`qKVMeZ%%Y z-d}l+`N$*@fzN)F-!(zppF%q4-nV|wpx$8E>dLHoEy5K40LO+!P`%MF?2yjUZ-nwB zh=)(EMO1NAo3nO)dI`{)SGD~$Gnx6oIVAH)SVj&y)Dk|z4_x=E=0Hf#HPv$lTakCJ zJu5!qFjqd6XL~5fJ^85LmCs!N06JnYq(X-WjCBEZO3>?O(xx_THUKZ~F!jkIWkMO&h_#4?IyBi$vD0 zVZA?c3!s28^xU9x^atMqn!e(Pcg087DR(dG^Xz%4kdJ@s zOf8!2bsaOnnpCC-*5}kDQ-=E_ARs-!cVVBX1XerG;DScQ)YNx7p>NXywpEkWC-)gR zTXv3kobpXZPHM&TShd`r*`9CNbNjzLZ<&k??%T&-N`Xh?Tdw9n26^jJsOO~$zSyb4 z`p_{}oKI>{DovT7Wkv+l1x{-DTGfI4EU=)yiulcsnWA`tgZ{`WPClllQnx{<+~-ta z)wMikyB$71hO=QWXVoBLakzOTeps%(z8-jcQMGHY3F@z@lpC7eg2!+NvJvxs9Q#)^ z@oWjGoN$9pD<(vzIvMt!n=O!ZqfjNfb9bQ|~bVD95QIis-C-2-eXp-nn!% zn`b{OP62cDk1EIVsd(B;scMAGD46=z{fvZl=QV5lC&H1o7sOkD@Ag;c{{SATr%d#s5E9+ zAp24dOIpa2W9kr=ABd;*x?Nbkl#FH>`U=qzT8$y@Uq5O4P(|mDFJUdmuI;?laSg+% zVQFwi4i4TjJx)4Sp1)%zhu~^sgqnu!xeW>~GX9SZatbTOu8t(Gr1Omi*4A*nx ze+t-W5?D8fe$#(xacdRCmrS`u^Lk{c+P_TK52E;|Uht*Sz0fX=w2n5u04m^iV0a_i zzO2$dH2ge+!*(|Jl9*(b6(zNb1GXmqJb*TY$s}N&J6DC6Vr$cqRO5d`c-br3=Y)8d z!J4mxKEnEeQksN9SjwN6daJKcM+e{YtgAO3W(F~w^z^Rx;~k!brT9^NZo2DhdY$$B zF{5t6M+}?J#ZJH|1Msgj(=J8KQOdo?HR)lhI+T>3_4f(1q|xb`Q-D>D4hsJOI`_Q+ z&)N3&Qa>ExzI@Wo@IdT8`t|P~9(~Ym{`S*b^)soly{$|Bj0d>?0MIK6_FG*(&|b^<8sz6;KzafH0HD_eHPzOo8^s8D&Q%g51-|O@fHTf(x)Hg=b6W3+k>uYT zM|%!W8Q^2zy>L2)r5((LH+gEKoQw~1$GvrWwWpbNEMfxp5s#WUuuNqC0MAATVq?*W zt`_ne7(aJ)s@hqP;zVO}CVl*|xA5Iq%P_;oPJ8-m`5UY?N3D!mL|4GVxk&T4{{VXf zlFj~og<;%ZVZtKjH2`Nl0PS0w{3mY2zN>j75y_6(P-FfC{VO^|@KlfTUtOaQ@5F?D zL8?Ersq~D#;QFMyue`*T?naH}1aIiLAC)rZR*7MQB=fpM{pL~A=rj1A!nT`9)9ywz z{ZbYr;DI*Pa2!Oy_B*vxwcOAM6b6n)sa>m|d>Q*v(_HSTCb{agB zP2nFFkT(Hs2`3+S50D3-;+<-q=QLkc`J)&|-m_gF;hr^HK?4c8es z@9m1rjij9-w~QBdIKc0o!nckz@KQ1wcHx*Dg&vFV@A-T5qnVRG`YRH^JlW3Pp?)yHRM@yET zryZ|AXSZmF-={b~i~geZN9F)P7gsmc_@G2LvsEixG~fe{p$_gEjk`k z@k_*7jrG=nrbDNVCA85st;A8}29cOH^PWpK4oNvRrDY2XWD*~n9M`yh(B2G>&5vHS zi~~R3Ab-nLg}btjUYuapW(ub@K2EDbrk6K5+3G7tgV!T83^41`t&~&1 zs?QMzjye1*xPq*ZnUtqP(>2ZB=ogYJJQA=cwRTa)*&pLqe7-pA-kB~pIX?qC zv2A%HFi53Qbr0}O7Jh7wncL9y1aV((_@}~J0{8;<#?EPN^w~3d3@wu>nC>OX&f$^> z^skF`jV#P#mL8E2qW}CY{H`F{bY@2jhR#FO{{-D><9}j*Z>fR2~H96zi*^{w| z$PIyzI5-s!vEioF?_#pFk2Bmd!x$gL&PLJsnxpZ?Ef>Nb9Mk+KqBxd+D5ccP{{Sr{ z-a`@o$k~DF4tv)QF>|;ww&v<@C!rS`F*RPu8_m zGLntjy#{eb=yZ_2PzE{8S&mh~Prcf?_*{d7(!F29{sh)9rP5KNVRNgBw-RyMKsR~IrEm_=a z-w|~!X62=tXSj3FvCck&p!xw=DSNJ7oGw*OTheqWp;08mE!Mh(*nl7kG3{Q=VHhhx z94*dn^Jg_4ldMbrvSKs!sb4_U@&3G@O6nF-`2JM$F(ZtMtB;W!N7!|%d^9q#$4pkn zkK=uR!`A>#Tt}anul~%xb%*O(qEe)c<380|XrY}-$N{LGIL%sW+bNP!o%5gX#YOOq}*TGuEnKcq38rXS&iOl1H^9T-q@G^h9zF{=u({o>U5zB8VuoY5)&0P9!NKN~(8Sw&)>5_~5Ymt}MV-&?4_ji3Hn zs`drAb?kZd$IJ(LIUOt6jFggS;7ozInc&!A{gxY+K zpPrqi$9ZF@#=*~){2U&KvHF8sI|)4x6eL7UjgI1?dc~#@R%xU33*aWbcdvLVXmtrp z^Vr-xJKvTlKy}IQjFFRG2m4XWuV|v-(sz!=;i5#1P8FG#B%FQXFmaxptJQyIl178B zdI^=k@x%TV%YNAAIs60RiE;DKeuSR*@*Ly!ubjhSuV0!yAK}#6bvYcDf6Vw7Plo>R zZpqtS40>Gn8<^JqnP4KbjN=@BHKHIz?4t5I{Ly~h>g}-{%F^-iPKq*|) zl+M|XYDH1SPE>vrm;!(r20DyTM?6q?=9+&>0An203y6%P0OGE=J*vBAn3;<$oY?tX z)|ZF1ElW0_=oVe z{=#_#nzevvmPP=UkpPkhIT+7CPpHpY`Ag&7=8>-1_;bac8V_r7(Y1x`sRsz@^Q#6u zcM|y|l22OqKiP`$#<8vG`Tg9s@y(BPV!z3&nfR;V+kG?O_N}XI0$WXOOlQ%2_Kg0G zUpt0Qop`7%-|4B@G`VJ^`!nM-O?Z!$jw;GW;zQAzyDK&g3O+iF*0gN6^sl9C<52TB zG>gSVWm&OOc^)js-fHy=X&0qm3^9xh)bkoDUT2I*JXIGtYJH!IHGMi|I+P@+KkFm; zU48iLUcBKSwPsyu_IF59NWmwqXDG%>(nC;`b&QW^_*wC`YY!_)@igaB(C)rLG@m?> zXM>V?Wd8D=M{h&fd~fiyT35mCZ(rhDyPZPXUq5hd*8XrvRS4i9RvSR$74hHpbLclF zSUI(n{_#J%{?Ph&_OGK4iq5_JJ9x9gm(RX!8aO9_o=Jo45wHXO#{CGdE;w3r@e!Z( zi- zt+uNUn>r#Rz8S)t;B)Jaap_)=rw5v;b!%>}v+N#kP-`lRJgu}lHr1KSc+*>+O*2l= z3zDMX&$m4Sf}{TcLN(~#ACSkV#5a7~YsGKX^)C{tjzVEa&{wzE#vz6@9N>5UbW^9v z1rbJ0Q*vjg=nh{5pIZ074LABMXZ?}<&3yCWrv^nGzxws`4~G~njAWbuxiob&sj-LS zj}bwmyik=g++I4v3`^9ra0&DRv3y_SiysE)OQdQ403*07aPb|oTu46pi3ckR9vO!$ zco-dhCihG5=ZbXAGsKoeI_VNgad6qg5gbm#{{SDs2hzSd{in2DOT%}%Yom{|YKgMn zVdKk%RFFQb`L8Z@XNah&#rwWr*Y&X1B{(-F?=JrU*Y&yQz9aaTCDHpllJ1Ffw%#9- z75@N$j;Gx7TqK(QwWwh&A=rfW6}90n0}Iq!+w72e$MvsIT{6Q?g(8X}yQwGi82xMC zqlSbPpsab*sZyi7nc}T!;#;RLE3iLWMbLHW3m-AQQIdVSe?BYLCy^Mj7)`uzIH$8E zjjBlCNi&d2_Fqlj{qgzNPbxCyJce6e6lvql^BG5M^glp)*Gb{;h`P3oFox~g-<%c$ zbVvR4IUiChcH}hEgE1`6$^QWBe@e@}(e0=6AY>cYuY6;#KHck@wi*=`q>U-noi}*1 z*6n4`{9+Q?CWA)3xE!8Mr01~!XUk%JK2z;ePvJdB!u|%$bmVw1_atk~x2T z#dDl0D`C2P$C~gvBXyukB5ccK=ciJK`@`GRSJYk)@eYfwcw@v?TJ$F4S%XBo78g$= zFlg2mJ2sM3FdTxQWOe77@aIOQ38*LTw!bsdRby5TO=^#p?F4r5gB$Lbp~r8hxAUhn zlgZ?o$kH6i36+m6*0hPq_OGJ3;)-35PJ*I*5N4LjJuy-O z5NAw%t!P36FLPPYbjM25w^Cb!k4mGgy>A?j zLV;TmTQDGz$GuawhDJCXlf`s)x^(VJvK$O#1_R%{Rf;$C%`qA!lr)}vXYK?%kEj*( zSB57Snj8$ok_HZX*UUOjrv{xMK`p{<Po~B)40#Spsy(_Na~9?E2qxn##kI2k6Q7s5?fqo zn#I1WWX3rIEU4p+vLVPnrF(tyNV{1^2YmVp;QUpmo839U8InJkAEz~Qk2G2|KCQks zpA&u?MX1{-EqQJxP(OxbA3FX7GS|YE$ogKdbEir2hGOauu5d@^Uq=4TJ}TWFqu|Sz z<$u#Hf%G_8RDBNC{7rfH#}5p*hx|&~m8xxuM)T7hHn-j%hxD&Hl>MD#`b$UZC@*$Y z`)a*9oS1CmJQ{Wm?r?qk)o4$h0?ZCW0ald>0FjFJh$WAbJ$lu}MIZrHl`u1!+R=3O z)wN4qKYqbHfIDOEMmcu+1qA*S0fFPIEA0zR(nkiyc5^`Pb?@{VU>c z4^GzC7aD8vwYM&P5B~sOmI6*Z)L%f8=8R|ZeOT&3fG$mx&9x>Qa(f?N!o1GrGq-RYe*M}v! z8oj($-|Hlic8=L6pTKoAo{H?1x%4fqir8v1YO_Dt#j;4s{{ZvLX#M1a-2uXXw90!| z$6h_v8^qot^3ist;HaH;BJivDC>8Xd#P1OeJ4QAE@b?jj-Oglhs0XEd z(cz6gTkFX+TY;W(GhBFE)SIs_Md)53*(yusYySYkIdNxdtQR9373zMXu_dsPTarC} z>)Bv<=8wy41@1lT686R>Cki;}UXjk=C&AwWr;dDiVWZs)*+fmn-@H*^l>Y#MoG;Lt z{SC74-LL#6num+0jaEAdg{7JSfVfa)a65KE=mD>i{slZbPl}O~<&x$^jGy*LU(Yr7 zRLEqqnluL?cNdzy+|xNTYE!!( z`sSyn)1dq-%ltoLs-AQ8Ju9{N(uDj%eKc|{Vn9cse7XGytb1t0QK}9wE58E2+2H%D zlZpO3mu(B-$OAp8M7-qHV&2rAWp)`{wzF>b{3%*oilAiennNi)4Hgb!)5+Y9MM7mJ zt1}bVH1tMeK(0@LDwJ+>*EQ)M4m?RWh5SXOYih)OhILte;%}9~`k^(zMGkt_y|g}F z;o0k#5U&gNH>bUs`g7y8f|upkxec ztDj1l+lr{inq)Dod8VkzrNv0Anh8F}{g~JODosrP0Mnj7@%66T_JfUL_zkUX@2D$& zCib|$*^~Z8Eld8rKlXL5-}Z?A0Fn)?{{YwWf9Y*_*k9u>pXhIi+5Ep#;e9O(nqoh7 z8T>od=-gwiSI}~F=*P7_RWV9epz15wC2VNLHq(zv(_4)FYnt9c$g5FJ)Mk~7Ozmyq zjkHVq>0>35NMnh?I8w!n@zC%9=hnIZ05c~!&378iiKP4|jI!ce$&y3s^ScrM0FFjK zI`d6BW}4Yt^*!sFjQRPWnWWX4UPbu=$r~dM7qF;T{{x z&X30+lkLE-pfpJoL?Lm=Q;>c8eqC!eT=qDR+0y&_TGw6h{reV);fxWV;gC8taQ^@n z{t9^JT^7eK1XOKAb-ml z4&;1y*l;|rDM{tpT9TkV z$AZVAmh8U%r}C_ZZJ!b@~liA*ge{Qm%WaTq?nzv2G?)~YESj;1`~NqK)@dB@{c?`91q?Z3K<3aWF0 zf5Mw;8Cg$Pe4P4w){5N>*)`3zymrB4UBq$qKmBUK((h-}jm5leg0V~GpY~l!vi&)$ zcb6>g<3B2JKdDl8qK_a`Qvcn1|3Jb^#~2PfQ83@Fc2NLxHqZ2th^NCnP#^`tGdaY>wK zG|c9Z3R^VNcqWcMw3yBX04H%v$r<%DaheN(NDu$j`R)vF%Wxa5bTQk4H%Es*-5ej* zxv1r5ATh`luLS5D_=Y-nt4CSU#EAroj;DFYL!5q9vevgUMiw~S0&-h!IQ6bx2bu7| zo!QPuQC&WrbsD}UxRr)J;5$kDd8&PdFtW}%@w+^JRnhov`qxIfx131K33y28uT4P-1EA)o5(PK+V-sxx#hYxuRno&Z)dJUotE8{IW0Nq~q|fv3@RS zR{kgOh}A5>l){2!Vb18BbNF%(<6k29zfUi2mukGl*XV2M?}5G~kHo$NZ3g+g$+WE7 zI6Ybi2VwjMSN;QDJ~}+Qf_$=5?fD-k=met8r^)xN+XFq%E=6XzrFZQq!$nSLN> z()dfpQ%8&>a+S}jG2s6IFV4Cjg+3rY5BP~@(ze@cw>EQnZIq3q{TpeoS^cAYE+>i& zauxgPVvvtf%Ad>mSI;`0%Mlj+4yeZb(C@1CJSk`M5HaU>LF?^Qu0UlR`-9)#^6ot= zD@eL)izN;Bn}V7YJ7wf-$@uy}VvxA29J?#NY= z;d_ohcNzCpJDdHsCb)4@a%zNkf03;_MLLq`d54L0`|lg;EoO&wms6Q!$LKzRcopc- zXltcu@xl{pC>y>10Lia9_)BQ5t}X4yT=AOvCtkG$7U4%+sU5Na>Gl4CyP}j*f;r)N zUT>-7ts_GCJOkJIQoX=&gMvj`)GhXsalz^DT=TOp9mQxSdKbf6!w-%$xPicXgkQ|! zzeJL7w>FTU`Dz?~LpAw@;alNh;%zGVjs!Q5u|C_ze@gub@h-BOHP45y(iZLI@ zFBFr~ONiuC>K+)#KaOjectIP7HT2Zu7{W1z$!K};ySsatO`3$~Ja?@ogB_~7N6lF5 zR3Q3O);yflyE1W8AjKi1MzH0+YQGdCii|gEnOFnuNVJRbA`YV%t7B$H-VJ4m@xbP+ zTDwa+pz3QXzXLUn==})zA1R;Vb)+xIN0~nY?0>CzKgZ7wLE+zn9vqM$S=UvKiSLJs zQIE?N@1F+kUeDpxtV}RucVYhk0&4J2+V91dw>lP;;cGIbVZT;5^@=wK9>Z`I^O!o2 zmLWN7(R%Bz$z0N`W}`UA)&Brqr_6BO2Zc|yDVfhfS+;I|%$)YC1+$*@^#?V{ncKSo zNfYK|=RYv2~Q=Pf0d|{zV z7RE4r1#H~M=Ec2689Dy|8rrq-55q{}*=war>A00Ga8!VMU?>M3xXno}z2MoDi)#sC zC%C!Y{{W#ad!JgB{{V+C!~D!uDBI&l6wUJIIIhpb-UabSvi7$NVLUcB5(F0#Aymh9 z1oc%Mk~prD!#@ywG2tmzYh4=N=I9)(w{DM*_drD%`sTgM!~X!ZMXsqi(|luT5-?yJzD-GDF&Z4oxC9S z-s%*6RwP{i0KZ-lqo4Sf$KQZ!>`&Sf%Er^+CEV5*33!*fls86r$SUSpvM@&&Rl<*9 zUma-Q@!=oRw!zcnp&Rb?Go4O)vSyb2dr}qi0Y~UXA;I9&hX;!586CB%+XJ+9tpvyU z{@(-yXyt(AN~OGxZ48g4wiwi^=fY;9jei)AiV4C$e5e{F|DnegJqj@MlW# zE`2c3zy5;4zdo18QjhhvBk*!Rm^EcQb!pY?$d6r79!>S_{CC-A2?BpQ!_ z#dkBDT=Vp%`hIk@(%3YB2?Cf7d89AcQ%g6lDFG&N$E_xNVxs_d8Kub``cMO#)|hE` zbCM~!&osdQ*7?pOk&-%f&1gX*qi{2t$c9pWUI(pcTarlHKqJ?Rv#98zx^!G)0Qy%$ zqRy}fl0Y;4?2LeW3g#`=Mm!J(dE+&Crg+pUuppi{59jSxyMgFdm+d2|jX6H7Ijb6^ z+M=v7!Dyi)Z266XNysA@HOyJf50nkGidrx?hhQ=8xavPqTVbo22I)0e0g5eh~7~>-&>s}%7=Tw%~X*A1&jDX9=Gmb0V zB|G5~cOT^c09x|#5wkiV^f=8)BRjGWUe&?hzdXJIB!5+&0^5^Y%%z)8{|CZ^5zn6I~tW4f1_!%REzR zVY*Am@}>Hb!3;SDvBgqVDKs8T`g7yYg~j%p4!Lf>G7l-(erSKE;a(G=Uj4IAj5+z^ z`B-`p*1o6kma7NEe}}0R$qjKl1}QzKkBt5#XXtC>L2GrQYcoS4m4ay+Fnb9XsQQu5 z=Uz5ViN7@&?!Po?G~5&Ir^}(~9u~7p&2A|tP4LuNwI8ugR!u zap+OW<{NL3$o?r9?vcM1C-Sd$@cyG^qj-N%)ZNvlQ|3qqJ5nM0r9SdXz~inHHS!g^ zP2yh`%Xuj>0Og52$voG&58h%M=^xFU^;?ZQb$^lSJ{PcEKT3{0xMl*nC6cO!5hBJh zz^0_faUPzv+(OespV#3jaF;2SE!=h^rG1}oaUaCLg)vUpdzme;{-StzZ_I=F*T|Zr zetn@*IUI`iPubtZ;@;x_0K)e#@=J@DLG|He1OEUWKb?HWcLx}IX!O_o&Wdg5*68`V zC4Dnlp2|;@NJ$^>1A|*dinifhPsYy<9S6me&1}CRZ?q;qxZJnoKb3P@ED5XKK?6UR z8~oBg_fbaXBi{po+~8NUgv3Us9aL|jl}K8pqt|1;@U#}XCYg8Q-9-UNQ*tYR5YNb; z@Hqy5-9y={<&P2Sw|*zrrJ8p#Jdzi7^(6CN;qiyW(O6nR;d=rREr4l)o;>291kZ6A zVF%)R*NpgQPdi%jQS(1M)51l{_2s+U(2PUXQjbl})5Bl!ExdtKj-Q&IpN)MZJU?sF z+%L{l33&eLBz~3iBRPrVm;0;_^fmOifUZ8)U`bB&k{MxP^uZta>8{t@m^&V6eF@yr zk%9^Q>yW#Is}qX#9}{VkY1i^V*&$B{)lWlQE!<(V>sEy%a;QqVP;h#K?_X?d8ic+$ z_%RlWd*Ue`XWRk4~s@MF^ic-?ukDmN_s5YsF~h>zbKcV4I18uH6H(d|@@gl4^b4Oq^KQR%1k z@-wX$&gI`zmzE|^y~R>sr`o!U$WZ44sjhx_2qf2efk3K*#U}&3D=SrY3^Gjuxi)AI zRe?CCb4tRwKbU@%-26Y#?sffaTfl|(066ECI3L!$_i5@6=U;mK5AdCjf;T&{U#K)lA@E zQj=dvW0vKe{i;iLCV27v;r!~D$9lPK%+dblWbB_<}fA6pUf@_rh zu6g)(;LrN|ANo#frv0CCz8uz=_jUgO&`oonwc;55ANV#l>bHP?P#XDss&~g%U95Ct z7rMPyLXC{ETX`dZytMHDGKBG0=@P%cD%HCEx##hW}-)2GMj28SWhz>~|&!v42 zrfC;CFT%}8viWxt+udDB>Z%uJ57A>`^=jnB)a8PT-RNybH@A1>ge$!G{{SGD_I}mebBol^{ss#992-*1rw7p| z`5m~H)jO|GzUSu+hJkT&2z8KvYjPh4-bgr-f@= zQdVVhTUeS}--~9r_$jOSQsPfCIBzW{x5?3F%z}EQ}tUQ}zrJ zI-Y%MB_wh^Do{Z@iV7rBe($NNhyagVW}#34$>$X;d3Mip{zV~|Vjt(}T^^enlB#_< z{${!48&^KO55l@FIPDC_(;tBAQq5v$UR;3QS6l)36Wj8yLKEF+)>>8Fo?K3zRERxT z76a>+82ZIHD(+l&OgpG;d95iHSc~f(%g7%9};Qt{{W!guKCe< z7WW6`k$C8(^Nzsr>0dnDmU-C$7ze5K^{-7x~px zQ_P;7Dr_qw&*$*1q6R*dkp3A`3j@IZb?DMU(;bV`sXc~0YbxN4!Bl*y!2bXmhLJ5E zel*MdK2JK%Mvd3HoL8jyFU9(;rlAeYQkz{)%!wkEINgrCboOD;ipeTlIUH0r&=aZK z$r}9ozyNjUj8`rqlw}8WX-aM~No;ba$ zEA#W>7Ky8AH=2I2siOg{M((UfR)~OqMq2wj;N_!Od_L4y#2>h@n(3UE;0W0jWd2p) zAGG(5^dA%4_-eyIj3hc>xeM-%<$=dUn3`)XT5!);*ZHa>A0X8GH3w~ew1;Fcs{fZr?mhudiJEJkPiUUN|8Vf zqL&nu0sq(enIzgs>+f1N@<)TX6Q54igSg{^nz;j~C76Nj#acH^=@H^1EIl{`gG$(t zZH&3e8@F&ctax5$VzA_|zyAPUwBc_p0P>lnJ=pR23ZdNGu(+1Q5gakQk_eH;S#j8Q z$JAE+wdC{eb^xSvoxs?hFs zx(=l+madmE!giwpBO%K5!5-Xp_OGq9tEIcOQVv6{d`|=yvfaJyu5BS!9%5vi0&&x+ z2lVNh`cuFfy1l5eGI@p|HnHx#kMyi7$C@`ZrurQ|s~G`?IUIf!z+PbqQN?W*IM<;lR{w7)g}Xz5K#7Jt|LG2~wsHA!?20%^Kf zk;C5sDUFyC6&GOXfy-{`)B(xsUPs^!JIS2(Os z;gK6A{&^>MK;$Xry+=UtPlhM>ac8S|m-n}t6v7*Dq#1ndVG4th%5#MNyeks)0=cgZY5q(`=0Wp6HP&Qgdso`%v`3c&_hlK!?%02x zYuJZ3~M924lu>Au60D!LvbBw)C@FS3(2*&}B(E8UK@ejcEJ{-Q1{{X`hiS%oREU}NjYR%7-TnA5ITqepB-` z9STC$zrM96CL_>)x_f^r^qm9ZTytBlpL09yQMFP=+bm3p}6ori;)be zrOz^T-3s}GA5_hDM;A(3b6NR{R3^2x(ZtD)l~t8;YhLF_)*xahv7R&N(XjrNOY9nf zI2O|9{nK3@(ml+1W!TRtQLx+euA;-kI;H%AFuq7VO9nXn6fv)4(6m2KfdkTV_DXP7`Q>)wXf@vYj-OJ zxGQZL`3H7C-apgQwDh|*w~Q7Xnx&@ceqJS%b6p4rrF!v-iseV1uHQ7b9mh&{CXguy znweB!VAU64fMDjXSfkB(3{i}ia0ONv@5W7gKf>)3N71zi{Ac6Tc(j-ijieYpZQQIj zgBs)#h%!s`&HyKzV_JN$Y|;wyN7O$BbXm0j00~?)>QLTGk)Q624A+N#*8VfH*5J^z zO9A`4Te(~@1ZQdm{JF1SljE<2EykTLlpfL_F;UBLtANhk`f8VD601Eo= zZ%g^t-i|M4KjmjK(=|k7Cc1OrZyAyQ03d76`}E)8ROax9il7;vOSp)7x)J>7`o%Bj zUwFNhPvw7cfL@vASAF5nhCV3qHNwLHiq#4ErJS%;9m&Fx$JV}t(0&*E7Vse#zBboa z#8=74w}%S?xBH-yYxO)nOF^Wa@Fd!1_1j)qXhBS55di<2^`luvtf^!cS6J;`0yJ8%NhQ)!2MQ_&26n$v&3x+FUx3 zmWoG{82g}vz{ul@@!yMo7c?z<;RdbYof_6lW|C-0X-M4rt`)V*dNFz#)F<$vEPoa#4$HxW%_)>0f|4 zEya@GYEq$9kpxOlbJzM-N5y{;+TQ#h@vnvEaJQBht0E!lQ|(Cnf51*_zS2H2d^yB$ z+LqESE!mZ0k~S>l<1Tgv>(i5-E62Vd+xR!fnuXBt?b^+8c@arMJ4EsxfT~D}u0{vS z8*%I_^V~G6Lo}U9McT_-J9(bW;~Bxn$oY>$IJ8RbZ2FEm*E?&Y>RK=Q7N-NlA~9UD zkQ*FuPhaLc^IZ+MLH#TAG}Bi{l#^OBtFt*fd)HA6OEg|ohzhXULFj9hu{}KvcD4vG zGsbF-5#oM6@bsExo0!3Cd7XAJ>_N`cT>g-8YKvc1uZOKXPi1|hT>ut%TVsE9$K7G{ zHS+h^rPFRCu((~a!0j8aqbb3pk&|BWpb0$GnILC17$Y4$sA2%=NVHCmnzXDic!h+w0VOG{x?uvg4u^)v#Bx)9c z5Xv27LXtw{7%o-5_V9RF4tn%%_&n2fYikRZmhVL+MoD{T$dV)JgUM0bzt+5_{{TeP z^sNwUntXnBxL_oV^yfT-*wuX}$2K$Ux4LW;;3UUmr?N;pdvZOiiBPGBpR}X5;Kl8! z)m)W#?sW6U$K~lz5)f@%5<8DtzOq_heDGbWyLPx6PyPH~l>JUVl_>DexiaszwKo9w zg@gWB&U=xSb@{lmPP>tYI*BdrqK1PHX`oA;ve_oas0(* zO9jXn@}$ZC0A%L6)o3+!CHI`p66{mDeUOl!#Ci9wc1`5*o@=6#%Tc%>CA)w)0~I`L$(J6G|%XNT|@MEfh~ZkmHQ-KhA5{J`LIEw%#zd)qGKE z_ZHB`?*t@aUGa_XTY!i%+3B1fg1emn{2TB@15m!Qo5Z@ha*H!5w?_3Uzq{kyzu{g@ z<6nvKYBy8P;y6UV^6zRexE&O?p!DhOUTs`Vt4ET8(e=0J(Yz=sZOXS#>!UrV;{O1} z+dmN74GY7_EOa|QWO4HO#eD$A5FST%I49D+B-HgM*0ol-0EpM+j2_BA$E`P3*Dv+C zHb2P-KP+q0>U~W<#?#9i*mGN9aIvFz)tuF19z zBZ|8kBHf>!TV#7m^*I%^TILwW-J}zdh0Z$joYoSoma1mTpU0Zpv(#q$C(f!>JCWBs zkLy;7B?~O$%xk5{?a_zeD^|+Z?Xesi7>u3}=L4xEcE>f4w@%HHp)=1Me?Dr{UEVq} zS{I5Y;zmQ%9>W;onrLRMTCDc6nP5`Pq=3WjusjihIRtgDOz^eLnrxA3Htc+~Y)uvp zV+XG9_DTDv@I3YN%^y~SOpL=4tA`_ZoSDh|dRL<8P}^Ny3#}q_gf;`($V()O-*{wa zgVPO+dev{Z(>{=nRK2nrPD8FiuQ%2q*zUN&2D&W;?kvk`w+ZF!`J)?yVNXC96P$F< zUs|W)2!?m#t$9<2ERRB*u6bqXJBb`uGd4qJy36sj*DrN~NHil>a-JZ!mPu5BqoA+NXOMI&#{{H~*&|>rpu;tcf($P58G!CaYGqC` zwaldCuF=Ez7fyStw!D`ul1V4IHSN*jG!cS80=Z8K>8oL2pzzIIGe5m<-lkOJXpVNt z{{RZE&e$Y(rZaQtO)N2r3S#f=o|RJMd6BY?4^f(%a2fTeT%1!8&C94qZDTd%$I2nj z;p#_BeS3Xt%XGLW*0ol}6cxk)F`=wXdvh#=r7?M*BJr#sbWaaxo)o8@fv10(aTPX_8EOz~BW=kC0MHqWE+ z{c8!BpK7Sm7^F#%;gE(OQA~!URkMt?%5UF-d%m7DCbl1 z+(jJU_u7M8ZSt08&rDS#euoKrZOLQbt#h+lu+BSIZ*~Sgm4_r_C!P&67LIXk%bl#b z2C}su2|;*ate^)XufkVm-0&zwxTm?}jc?)|8MfQZ}6{;=7BrL!H1H=58%5q$hF;&QzfGM$+Z6MWA#cGzalT|71!D=-uyw;Zvj~)cL%dG zF#L%kxvQOTRlOSlYVaEElq2xO%2J%ZDIA1aRn^OGnDwrT z(@cik1B{x|z#i0OcdZFgNKl}0id+h5U@13_!h)_#ky}>U@UpcQ-(a$rAM(;zcgS*c z%M<*o1I+pgib5P=s$~@=W>S+{qp6R^mp0Lu?XB!>!8yuXMZ5m`V8`{WeOJcU_foah z+{Ws1FkV7f;B*-$pzJFb$c>KznAVXW;LT<1Y5xEy-C6f3w~ck3$82$9{nP&d#Z|jM zAM5t4Ol%>~UoUd!@@h})_}ik6X#UGWcwAHVG~d}Ho~KC~r{af+f9LLB`}qF={8cCX zEe{ia&)mQF+y4OZRYpA)6Vz3;(d^f%=BMiPf0Y;QYkx5D@jt~U{$Az(0KVk^0OG0N z@Ur|y!{@q`{{X<>{{V`n{{Uv$ukZ>(r`q;GN7d;603si?t^CAu{6N*FTuSdC^k#BD zQ(150O;10&hCjtm_|~`fb+=u##?$Rb@QS5In%X3})PHEj{{V#3TmJx_aQ^_`2mb(y zsjhWP%}>LVz1wC(bb|n31;#)F>MHTEw*Kg+%WA7+&>~@3!~P%<{b+QetkM@W^}8FU zLz{KYb=rQUG}s#u+*nn?VlNrWD=>zPxYI+cJ!{k*TwKos;_HiN2ihp>ZBjmxTv(7 zJmkhJa@R|=82M^Fg+7rrjOuk~a~`p4e{hWyFx%lm?1U9wJDieoezn)x&m1ackl|Eh zlh{`Qr)l=^uw@@LZOw98RJ0#+8{6yHe_Gijm62(_rki-vSkv#m)26CMfqv|ZyLczR zOnw#Q-X_-5Sn&+ng2DE8xWh<*vyg{Tl`V`VCj=-FLT`JA5i#$uF zI4zJz@h(vQYl`)`5EjE?vZvGSXJfrV&P`0H!e0_0IY;4UWsk;B6x%13H5<5>&`QL9 zxvMh#Wbp)}5?{{WQa{3@xX?7hc|ZUiYaR3rH?G`zn&)^L4|pS0iLSPhEyu+zUR5Qo zrYE<~NAksUH=Z`s?ujLtiSF5B`JC0}(rm+h!R=Ne(XIG)+m6H5t~hE@_+pIZUaYr~ z#N6syt@qz&Aoe8Up^r(qoA;A&J?p%h3s@WF0pH)C{3|MFW^b70wz66nJH5=9tYfzT zoD=!f`Qrkd?V5|8w1hu389dQR#%KX)-&#TGPRHXw2u5jyb`>G^q$CbH&;)p-G`)Is zq@V<)qLh2n1OL(aP+YkfXo`{OYIr3q#hx(0bOd8HLNu2kvvKMvWs*g0k-qHp{{SMi zxkcNTi{%jlS&n^&u&pb3EfyqMS-i*OlaaKJpNG9>Y3U0w^W`etv%tsK2DhU?TXHT3 z8NtZ+=BX~DxaBAnLFjTaIsSAiOu6#5wt96K{5n%i5=cUk^8WyMADCmX9V<4tFE+2 znO9?+H&gYdhI^))zSi2;Qc2!ofB>dV`=TS31Gmz<%h=NX&`6x*WLJb}s8AT9mz?~$ zOpK9&KOg5_vEi%M*lZ`r+A^8$Kl;^;YIj#Rrz<-gUaM|nY?|Y4BKz6rHSAjUo47Ng z8Y0lnY$l|dlLz9ZR z=JD%LNZYy^0LGBAlh&#k8{W0#3Tm51GtDv@DQ|CjH#J-lsifGsz@#e`GTxNGKc!pR z2W~1wQI2?^EK=J3l;IPaxyA-Sset2(2V#QTmhV;;(04UuN1&-0TZ{?^Vv@k4I29tq zq-CnXPRI17uqwO{#*kQvV*{-@LW9zc#BsFJjmMm0`p^UWWYb9$jQH(PmSgQe$L<5K z=}a;K+M_?6Ft8j`u_T5{ZGz_}{9ZhRM5twHgtdzo!UrMyHfG}~+DoRH1ZM><+zgk9+W1hyd;<*G4-kejN z%K^_d4B0b4$c+q%--^$Se8Y;27Ug@Ikxnt|OfFrwAG}dshoQ&5LldtUIUfH0yjKHd zjFJz`E75dxSO)ld91MOQ^_wqxpaS;&-P&nqBobx~* z|I+yZu2E2g&fq)7!_k7U^CT!4)p7X2;aN}p2)}LA40X6 z7)Rna+m1Sq@vG59%06jG9^)dN#%bD5Zzs#-ykz%G5&02aMX?dZ_p|lwT$-6w0vIy; z)~&lcuL>9exq_Oc$~^;0xH3x&$VwcjLm9(oKb3b563bGHRJY!fu?5VsoG@%;4cJro z8uBYAj1>i*9o-LMUV#*PmZu3@#}@%tdOV~Ms=dJJjDIS<#*b`C!R2KDCJd0K|V9 z^bKyr#97gJ^sWl%Ix)b(s5NC74hBtBcJ5C#JcQ$>YMh)ldyXlYvNt%%J?a9=Dbg{< z4{E4i2Lr8AA(n%HDNFHHgZ31oJ#$P%jQnxlsLRN$OH-!?Z|xB_-ZqGKkFIxCkRA+X z2Ich_l=a#nbNZ24N~bSIQjPkY6yuuDe2VtF4+(e&Umi}Gug?Y3aIxLQsu=ojQ`hJX zayrj~qvD4Ee4@(w>Hw_heqlT`$~F`aC_GmyXD4suDHocDobW_HD=R2Q^1n{0K~584{5Xc zsI9BV9QxNa;7JGA?+_kCsr@T@>zt9+thKR)bp3s+26yDv)y``IYNWABq60DxS4ffgNi5bIGog!CoD+@jr>sj}$p{gL#jmtKEV?Mw!>=!@Iwv9{lWH?)?ov?QPRWB6I zPeD%z$EnR|NTdV!Q`e?Rq}rpcA$1f07Rd)0p!14_rvrh-K=&#-u%K5h5IfbMu}*AG zpjCyqe=Io%X?y}yA5aJ<>se7s*E7d$Z<%G4f+8o-UjeppNsq)Y2@aKk){LHZb0N;ry`t2V-x&Hu=eO`alM+n0Qtwc{X+}Zd}%Kr3QTkAGu3BY+A z#!t{Pnl-Nk+~{{nXLyq|lLBKgg>`T`m0^%^-_yN6uF=~60D!))Gm1y#3NCH{jK&Wq zgY>St$H7VfA{Q zk35NjaycH=sSU^j=E$z0yeHwc`@SWKzwL~IsZHU%KRCD6E#=R1l>TCcjiUO$x&Hu= z{;yGg!=sAT?+l0{Q}@VU&brS6LuYZ~TMK*WQg~gMd2TYSLNQQBpmUY-xD2NWAV0_GeMIFhgsf?uK zB%ZbuXiZA(XnCr*^9)CpQdEQ3inkv){{SMfT2_->zjt3Oyz;ig{3N+Me-4$^?(pZ7 zgLUG_0Nj6dw4V4G&+A>TR!NyfT&BYVhU-wX?LB%|L1z97W&=>4QD63>NVxpin(bil z4~F&I2fOf|uPyA+jm>Q;DoKy_eSZ)$+a|fEiG($k*T~gUZP^}a48M@8f>1kW*EJ-1 zHO#J&+Ce;w>{SNdS%**n8947O1>`is2U9M2F^&Ubb_|$CW7S?^?FLBQ_w%ZE;~fc-+!Ff6P?y`N)nSKfNnqK=dTnmRd2AX=8KqDIooQ zE252f)ttFrmS3rbN^S`$Ep`10S1JMTS@Skd2N5k9&t=$=Ok1J$z>{esQcE1!enqNky5$M z03bc-+j423W~CSu0U26j4)r<3K*Z1?ymhCq%^NB0$f&c$DjYD)048bJ2AD<;D4+-9 zCjjwHK9mfAX}O>e|I_&1Ey|?zR_tn3R=~=heF&sr%69Gz+MJ-|n)(jt7qZFejvHI4Ga-KOS@-z(+vOJBo6m1yEHK%V2DF8X+@~V-x;!Av^*BGm4 z$bRd3cdK2>HlleIdC%6h5vCD`L`;!_PIL9gLTd^g-2)sR=cQ{#^5iOsi!=(X1tA zS=B5FB^YEl%9`>`X3?RL=Yl_4_OUu&+O#jki}!w`sINcq1(9);9GsD#*0?rjdUr>Z z$&he(suGZR>7KQTZ0aZyo#wy!_LH75mtTWb`3_&M5aZ&;a zIdi&5Ou5tzqQp)8fN8W*k z<@H|m_GiHl6I*Jw`k#XQOK`KtaTqqKJ_%ul5+Em&fB^$McCVKhbQtPsr&6B7RkDr; zF+7{uWKu&bpag9rrFj{a8CQoU&%dFqEfYUM{v~)8*Wzb@n@6?Snx>MW?Ax$)Tmhf% zN~3A_=e2%kTv}Y)%O$`&&Q4TnJ!1J^ zs?k45FUQo1)VW>O&-!Q1eha>i(tDeGgRn}fRFi;6{{Z#ZX>ls;9M_h#L%lsGxrAaV znMbC0{Hy9rRngg9Fy}RgI0q;4tl#Ym(0*bc>OaDRf4E`OA4)(q#aCcWNN0_^43JDSDKyV8hwN+>A?Z{_5=@Y z$I`bx7<^LkX1n8STTN#5uPra+koislRBfz7Cm$|7yK!CyZBv^ME=Ta|e>HQigk>3D z$n@`y8g{OH5vh2SK%2>17-u(PJO(O*a2e_cJb{eon*7JIpHaPUzD|0Y{Z8>Dl4?E( z_>-fbnMR*9s6XDe=k;3r+VJI~8zqpCGwIf=BT;xbR9#a3zf&AnH?;O|r|L%ymBt3= z=C8+U=yF=LnNSV}DzU(7_e^bK!i}RQqmtoc8SnYi7n7DeQ7z4^02U)7^Q1DfPjcrG z#(y3vHn+Z$mSCgWpr2h-1Va%%=ZcOUOHsM|imJZ!5p-W&G`HV!e;SfKHVoi^PrXJi zH2`qISbkM{-Yc7#QV63hB&-osbu7mK55|J5k5>3gr9)$_f5KODW|ubFN=YdULaMM# zZ_jfc;6U!gSC>Ppo39bimp)^uV8HXZ9PoX5n)V+YY7qDnz&8FIwQ=^nNdwHosfZGD z{{U?tB!9Dk+P-nnqLx|PP&wSEBE0HJ*5|19dLyQz_HHm+^*>HN9_p>)-vC(64bhJI zAZrN^HtxykxM1hd*TddD@Ybi{uM#@n<-F%)c5nIUzv4fYdk4UO7bchD7SQgW z7FybcT=ZGj?jQIN%KdR(sqxRk16A=mM*mBLndOmyhWXR7pj z{mks;_K|Sw(D<;T4{C1Qk58>d8x~LAWlHc=`i=-SK6@Jbv&GTbISI~=3q+GSLI4aw4Ln-PRSP)0C70mRf+}(=}GF!f-6p`-T2kTt8 znDaxF)|0cjL&l#F^?RGPf@`TSE_0W88!CkM3)4S`6~QL2uHBr6cFkAPH0d=flQhJ( z=F1b@o=tj07I0fAR8hrkPK+tVB#vh})OT#+1MAJywIMp->RP+7c>G`9x4;>_NzqLMtb)@)PMc1^c_UV&P8Y}~VC3xA3 z9^{?8XQ1FtRsQpu zqjRX!>7m4*?XlFgS9$fA`SToo39kO>kPnu!H%I0)js_tG`R+&EADwnP8Mxc2hhgG9 zZpuK@X5t`uR_Z>D)EfH(;Gc~wwVi8D@c#gbnCd!Bs>#GCzD6iBbM2=?RAvXhu=zQCCMr3CVk{=*>?V#r98% zUM;gcELyG5P&mLcF(09)SsQ(Hx~`04tc7=)BkZ(Hbmx&HtaHZhY@BKV=3 z3%b2Ga+SyfpXL7m*Q*x98A27wCce70bHq6Hl;f8yFe{CHH3| z=s%@={j7Kn7Ki&=Ojm++bLE5sX+PcTlb-Zd!9u+~QOv4J4$a*kZFpUDFNwGMZ;34< z&YuQkxwYVIyi5f1K*r*$_X*=1iuek}BpUv&Z6wX$uVwfd<7xaU<0v7# zCL3Kyi%YoMfUZx-*glQl(3Fz47Vwu99yq*~Vg7nCF9%57ZuVD>2bgK}Fi^GQ-ez{OjDY z%wbBH#twa|lgE}PJq>D3n_qD4R2*}Va(mP<6^ap16%}$p+v&|#Z%RTdvhh@1c|Mt~ zCLC2qT9!#iW(_+PDN#*fNC*fu6Ld8(U@E&DoKgXYwF)RbX;y&>{c};~rD2dqrA0u1 zlYvQ^PSm5Gw7~z=`0*A{6`i;vr+zAJ%jx-136L)Ub?SQ5s;#)Y45;b{8KzLG6mTSVPDq!v(l~!{gJgHIisTNWhKv9A^kxUD54WNvimn&+Zs&o5RCFJOYT3pvBqTZ85;I_c zRA5z7CD-=I2_6p6D$mjZ0knU6s1=+RM=86AbpB5>$r@MUETeCtE$v3(()fZ zJ_Gd~gLf6hTk5;zNbNMlTV`BtX8Fe$B$1qF(-ps^>MbAcrUZI}>Dc?%C2D_Z9aN{$ zoGz_n<;YGjJu8s8x4w*sXH)}dSuy{YWO8185S zNEFR;6d-cWtB$-L4gEf^|?QY^>7$=5}hbj-Z-c$Y+`GFDp6pH8l zOt=l~dmrd)>i+>sdwQQfc;CVI{s-|j=Z7!y;s)IWq+`v6zT^FSH=_ewhn4MLpdS|e z9S_A11xMl?A|d^=rK>Wa>e&APTM7E(DesR{Uj={}LjljfQD0$$#HzH@z5f7J(ft1a zJFbVFD7d#7D>PB^fv8#_D@~LUz^F`l=DSI;n$c862p=yWO3^X#nuy27X_I1_$s!(k zuFt}@^6R>kI#sL;(n>(%+~9NDC-UpsxkC{xg&8E{-o1|9qWEv%$JM36>l(T;Tl&Wl z`Cd<{`-p$uJq>eWD!4|`-Rx^h7k7Ijp7^EWZ8pct@ZOBbVbj;kc@wPj^S(pefJs!( zMXO&0E?O@Xcy?J%3s1EF0Km%+>t8)PORMS)Bb{0ct4TCu1p0NhtP%9X1ZRT-~D@xkU z)O@Sc*0p1`PW5eef$h{#H4=*3GK`+J>E8`JKLy^UCbfO?TG%TyIqikZxBLZo9*oLs zf>nE&BZ2|CNmwF*)UzA`_?q^PMbN>KsA1D3L2WsKP z$+W3Qd463^nkg&I?2irji{e?lTjF`{M+on>G#wT-;Z%JES+KPh_Q@C+z~t90p<5f9 zhr5t><2BkWn^&6cj8r0`x*VywH#gL|r_Uv&!ubwTNZ3gm4xodak8pXf(2opjmVQ6* zgfB4x@8a@iAKbW30sjELze8V~vPe~yWE`Q-)OQu{zX!Zhbp1kI7D+L0s15OR(nTG< zqxBW>yhVkQjB#|=y$63)_Ltmxct_0CFRA9PwhS33?{{A~$Mc4lOAI`oyis4=`=YV>O_wvk2twA*JZ_A_mjInh2 zDqh?F08jBR7YDB(_NHzbs_OKivbp^$)Hv==80ND4OCkQvj@*A4$piGOf$`F_H3e&e zh}?mSmm9SE19~xZ06;;%zDsqswNucwZL^G37{HD-PfBY`)K!USiP`@EDyJlrjV5@i z0BAs$zt19pmf%t)vPhh?@%nBb&c4$40V>67tden%zsalOn=;c}$F+Vy!;|%|u09Ri zn|nP-gN@k!M!r9a(|8G`<@ux2%c0LV6QTX7x%e^RulopFf9Q#8O-!08UJVrelTfoCJ$|1%9 zI2@mG{c7#pI~=heDXbgIh7m_(e(K6xd*pvUYov*m?Z(vtv}2H?D~_l^{HrLx67P1|F>raS zc1Je@=xZ}4wO`ZNnZ|t$O<#eR4~ZXld@+$f;au6mPbT4iSp|5zmTPJBIA5 z;}d$h{{WfWnA34o-+%ZAlte=yBh&QlS1q}dfsxz)0IyW?BDPV{hy?o#aaT0o-ZGqw z6W=xVEXF6IXn72(NMY!I`qkSTZZpY|9TJtR_{?E}jyD&)5b!TIR zQR|XGtxq4zHIIkmm0xr)PaU~`@3V$gKaZ5{_}8CWU%Z4DR&>27%12e=+w{DS!eE|b zgxsTBIck0x`P_LViH>wN)L3Zc)=GxxB#=Oq0*R~sIt`j?Hhok5f| zw^RAWvxB*m6Z+SsNdiY6T>>WEvLlS2oA`k1n)1c4lF!7l6gz~3az8BdSh`lN?H6IW z`<%XJ?1-{R(%kMu&PWF*A6n0H96JZxcXU0+t!l{wOB|kBvk3VufH?jxGoQw?CyW7GnVGW(akwrxs#|hE9Fx+t$}+}ZY1`l0pr7|~@Gx7ye0^$K5nAP8QmY@$ z$0XGQv3pd{fT`)o{c5{oIjI6C+BzT3t2oVBe0QqSa5$-C5pC>hG6r!@2a}IO{Hh{I z;*b$axcb#RRrmRZ?&E`1ituO=2ekvPY3wmg`cMH;o+h9>C&xS7r{VLKLW$0xvl~ru@u zxFmCu1~?fxKZRX};$1%8Q!c{N6;h!5#AKeG#ziuV_LEEG?DC47bX;^k)!Ath>5h*S z;x&^1<)Zn$2*URGC7fs%s_|0^hb0fkJnG){>OpN0z>+~N=)*xuw zY4*(=wx%>`L*ZE)Xd?vl>Pf3I>2O&{mb;0R{H_^DY>uNnx&BpmOKW?4+(kdyKFt$O z8UQva2Mf3{Q_1)IE0n&T`r(1M^B6W+@Yy8yC9|4N!#ijtv9c=PXcHB|Xp|8l&${Gf zd!Mau--s?=8LUs0wl%7Jg^$cs9pC+W=>Fd=#k$+QqZSOM zTc<&cH?=LscbaA5-9&8WJ9Ch^8N+u{E1cDAB?Y4^ou>!<`WnxI=2#sjc-}OXh%DVV zbr?NsrMRE%Fo|35V-Nl#*w>#@9IWrBK4f`y<*5afj8`#n5L}FYRp{D;D#YTrd&r0+ z8W2e4r0u#pauL6!9@zt2PXz{{Ry7=ju2$ zNnZ#BQ@CbVTd2PJAIx_jjcHztYQe%tAvXyrBk!+=J~p>44gTy{-+-42f90F>41R}$ z?ag?f$KQu%!d@c(0ECN7k8YK$IcC~8is1arJ%pcld*pgo8Sv-C@z~!br0@QPenOn| zM?3@Vo`$~BwXw1B?}V>B5wHC3KAd^Td6N0z`)7p+rRZZQ*9-7G_RsM zuL1aW-%-7jSJg?2P1BUkGyedxzMt(e`B&WM_)U4I#E%zTc%xgAMH?gHIHF&>9BaZN zk3}3;sr*USt^6sXMc|D!fOxSb^C`~hvBY^DL}26Hi8bK5EtB3(Zsr;1isPW2NjmcR z6`zMOE+YmA2Y8Ns6Hp;@kLcAtS9x>3SJtmGt{J=Hv z-;M27&*HC(?MT2~TIqoQ0DeN>*1ms+ANwPZW_lUYUs_FeFjUNkhRFA+grtlSgZb5i z9Xvn}HT7qaVzA)n)~usrsi%FEF!cQ?jRcB5Q<3RF7JSeFrnKW8nW_%OSStg~JtCUw zc&%-NNi@!}#;eI@V}PUFlZpc8tb8%iBEQsS)-SNORtn6`&vl=K}tQ#aS3U|HYY zxy@^6ckUKZ5t zyiuhcJ;?I4m_tRM;JS<)566}KtK$Cv8|ZR)cf^+##WqE7Oo%bSY~k1Q=kc$8_+Xa@ zOTB_Sr1Pz1j^YGX`Nn#(`vM2&S3WuTXlvRu9x1Sk5=y3Gsy^(=#L4*bN8w);l~ww# zB3O3xUz6MJdLK!Rhqj`st^WX3{%i6+P6Yb#NxaoGm^y|W=A_(Ddi%rW?oM2jL1CPl zgLX1=Ri5P#<2?Efv<1t}A;ngCl}F5RRZGt=ZG-ioMxf;U$JEd(jxtE+sIH)7K6UripXFX3~DHDF??U(`7`-=Kk;d2$TzQ?chHS>L_3d!{q^ryn8 zjJ8+!?T_SZ<#?5UvB`RVX!;DDrCne8(CfcyKlvhl82tFp5C46^bAxmVz?O$Sk z)SvQ2JTO1$`q%#eL`h#9Sr7DN`d886zkyHDen*Kf;?LFoW!GetPI}ibsla0?FJN)_ zS46|stIHZlr5$$z+PVcLb1og9Rk8&9sz4s~X)P1KG=!5X6W*6`9M-IMDUZ4` zYTSBcB<1msN(!?%?fE1QwX3Eqo_K5>xIE&wA+)rQkfZXg85%JGQ~-3W>ii7W3sigm z0E1z$z3{-X)uk|5OA`sCQIe_2EC;o4UOf0tx3?A={(*fn$8R3jhf|f* z#N`m1#MW?j_n+i@*_7a^(@(N3-}p;aiyN*jB^(pw&Ww5X$mYEx!JZKCdqlH%qgG24 z{^*)Ekdgk%?gR0!o#F9jsK^KQL=DGN7^Yln`m~9a(a}$;0za*NQ>BUib@v`Ox24i7 z`?^g(;Zj*8=Y{5LtwP5eGR2!M@|h191y?(9j2v-ZfAOnIeKX+}hjBD!8MPg98QBQI zRPtq5)cp_weaWwuJU`-#$nTVFdkfU%7#!LJYAPqlBYwWuMv zQNS#)%ku%)Fy^@^)0{DGRhrdl>({IPzZ$PN%MO)0@2{sr7gA{qaQSVV#+(ty-5=pu z5K0#&Xqr-_VN~N7?nXKM>vHzp8%u^qk<&4$0Xwkcn&yNu1#xVw0p<3SxPf0y4Uy)u z+|gTy#mllx#4!?&lxO{+o@(7<&1?+94l*> zo}IpxHpnJJSRI*7*7dRYzi^mKX=nP_7THTl;iG0FiA94te#cP3jB)q>lNj zvhGnEONaSJO*};_Z6oE~pXW;QLdpmkW82f-r}8B+#!<+~Bm#5X(O?MeoRTtg-nMOR z}#Bv$IUNPm4p4+ihtj2S;^MxbJW#^wt+OTr#tt64?w=CP1MwPiN zp+fHb2{@@ngpt6PHur`w=N~Y@-I6hoYSLr5(8qACqlvmkQvF4!<$| zDx?NRl@`)LD(=8i47QU{LOZM2hhpm4d7?Vw4MI|@!Wq}^Zx*f zP9*rbuLS$CNfcxL0Ss&XD~=!U67)q>{v3Tw{rqB>SmT{{Ywib@uPV4;0I;*d#(sYT8aZo@9@pC)Xq8Z-1?PBPsI}Ykmg36Tm(& zz3}#}ZLHf9=h`q?lQ{DuKX@PahvY?k_Hlr9sq@p)OH};-0N|atTJATGbL)@V2f#_< z&kjZ7%?=W8^w`hZBT@570Oi!5Y6 z78ndi{yM|g>0bluAbhd8<>Ak^a=+}KV^1o^`$(wIM*4Pr zSI>3xT@Go-m9&p6-GF*mtoU(a@Mni+@tyD=Ym+fpBX#pJ%$W3y9RC2mKJ3rPrCYQ)W*zIEy8YWXrv2o7;ig06-1t9H?Ieh~ z{{Vh6f2Dj?@p>U?@w>&5;{ooLPy6m9Z|h%d+sqfiUJcY(@f_BKpQ@GqwelD34{j}f zI_nY+mrz862NV6^B~-(~PFYoew$txkrSQW^j=}XWh;uAtcy=E$(m4nMTeiSV4*pO(&N>~n znD?{<=8GKivvDR~{y# zR;L~1r~DIIa8B};$mF$ejr!QXxi^xD{?u4Xf(U%4406D(2LzsfI*EQR+n8gGw9P$) zYqV~)jGTL%Vh?39XO#MlJF8sCXI=bVPAAx*nJR01I-^f6OOO{{X;P z{{Y6CelOXN`Gjfz0Qd_Z_}7~M0KzkG>p}3%zpYQyF#U@9)qdxs{{X_@v;P2~MxXxx zfU*AojYIzc3w~IwrnRtCu!`aQ#}@Ib62KJ%5>DI#N6U_;yyyNAn|x<9pYV-a9_FcF zXf09`#MXPBi{U>Qy}qWprjYEj+ORCG{AhZq=ap`TzVfuOwDCWM-7LO)ZB7w+YJa;b zugsrPI6mUPHMHF|G>f&miO$kCxb;*19c%6n+4IFF;tQV#+)0TbKWnom(<6G1p~3u3 zeAaV>_HvFQn%jRh*Y4dNxO_y^8fV9TEzw)Uz9G4>E3z30mM@_^ulxk$eKB0TmvN`c zK4$b4_4n-|;7>4rZCJ;!b8wg)0pe8rI{p>%R62IW4G>n(eAm(7Yx`PBz1EE4#M9-S zo-$B&?>62QiC9IzxBiKizBjT5?8(=qeFhu&6#WDlPodwNx+9 zHPSnlJjOg{6(YF=6IBCurD=#1hjUG3DmXlM%~=z04r>GL2Ll~x(ak1sI#4^E0CEB6 ztyzjvz%61%#N&5kT96eo04plL12luxA7^|nv(JOAZ!ywT8UFybqP&;(!2tY2@X$Q} z0K~b!^mA9e{40T*;kA$hm$)DLG*^QD*ZBVcg^A&uoPWf*zw~og%;XVjrX{|z{{S<5 zeB7K9>Hh!#@{J^_gc}2KiuGL~EF?kZ#HT!9k4o|QF0JjO5=kIX2d!VW@r|^2Nb>u6 z!2bXm`dKE)i(OAv*1R2{YS3=Cj72j1%EN*P=QZN`{I@oGSkWfP+o{0hcFLrKKasCe z@PyjWitm*nRUNh6DOxHgsmKhRskgSN=P=_BwnwLs~c=W|vzui7E0nU2h z!oAPFdO%??~#aU8zkc=B=zF8^sN_Ak6z{-?FLzt zjt)WD^$bVlQ{U*a>2etLPuu`N1yr`>;N%hOUG;^tTWEL9X23i7&XaOJSfW2M&rP|I z`)#f~ZEt5wN2}3%&Wt=cr+2c{@vEP-tg9{5g@!&=kmrIk)1O+e_Q_HS6+TuZI_@Eq z6eNP92X7f4O6T3^>53fqinj}reY>a%P9d}l|lDsmQU=18#kWU5h!zcp5C<#fE3D+#ULn> zNh75+9`vp2-@P}A1U8e34Kz?8aZRUi1vY`7|JV54%4acx2lA*+NgPvDV~|vf+7xBv3z5kV~+TwH)cVJ zeqo&R$tUV+YrnBT%WlfV#;T`+IqKOb+?sNwfqkGe?l>EH#~zhd7|1SULVUc2AdWcV zuPf?0iid6Q4XR|3!l=(4-Az!D1--)=6trOQc&$ddv5wL-j%ft4<)sV;2R|qn?T$@9 zA)6XqiI&yOa&Vbq3C}pKi+gEjkf9;AAuP&AC%F7NeiezM&Wg`-9mIHvPEoQ8X9FJn ze@fT3Yo%D{RNfT<3I|-b8?nhXYQSH%xsuNDBZgLt;{+3dgT_9b*IlPYrrD%dw-)8@ zfejpxu}!BWjAVSx@6xcLme$@#Ww?PZFJ&c~5}>Hht`6*>L0 zB?#n1o6wL#la|LPImba$Np*QLnO5AA$1Hed$EH20j94yKDOM$5$Ah?M2R*TlwM$rl z#M{T*enkkTQVU!dj(AX|dHR7}FW6RlR<@DR@s3oM!R$_bYbnO+=CGBtJk6xR+UFVX zS=SNak8b{=w{;CY?cJ1u060G1Ur%FLax+EpILhbp{{RDA#>~rb%g^L$ld(0aDyigj z_32cjw79ysOIv8#X{BJP8YyRHJpfUXN%pF_4IabrQp$I|)-?#YWP&SXW7Bl+lm7sY zM%CdSB)pSR@%FcSAR(l>eZ$a<59wc4cy`l8@JEIq(RCeG+TUEeovv>o$tDAZSW%c| zGKw2|=ODH_SBd;j@VU3~mG_CXds#-8sk>YPKwzYvNMtOf8>k?V4ne`ME|pmQYI^9> zH>pj!F@2fmq!4-LfDS4BMnyD`2Q}*)f^;F05c( zPD!5%u?T@HhCjsPD~|ccdiV=NzLL`6ZG)KETYN4K3a~lAKE1_#8{&VB+J>Q|ST(#W zZGUZVi@T@!hnTq=;$ON6UUw0=XA6%106RQ9sa0}^TYq2IncW^)+EPaY;@v_o1b8aL z!;tJvM(!1!?Ce1z?;Exk3L6I_t8z9Li&Z-xR)hCsda5`@5Gn#Pb;&TT%57V zBRxs=Jxy=fTdk}rgQ*qm(539++uiEUI+OO2(`0Eb5CsE@kTDsm!G>x;;6@^kzOefoHTjDv}|}Oy;L_qaz%J}SgI9cIc)y`Ugt!UsR=7;o&Nygj}Hug z3U+%_q=IW{TH5Y6f@sbjIRRHw=v3f}jNn((93ydqpIVdy+o!D{IXyp0>bx`IJ!i$2 z4F;D8SYjET=2j8G3i`<-VMnmePhnFjD66uPzKGcPccDJ3t(&J9x3NPbG3Z$Esy)g5 zGJ97Q@t0g}cgJ_Ou*hYK(URE@Vj@tiM{Zj^E9lP;Cx$#WjidhnY*}5nkQ3xcRb(P2 zMPD*NiU@XMTO@KZ(-rdmx8aRfLGfMOI@AvvNZxF0{D)WMFkzKp?gYSSo^E1@ZzXKzhh?9(U6b2r&0U;pw z=~0t|p17tL;-VyyMF35>5LEZB&%pjBx$y3>WvyEhWz*#am!|moGN1RCBDhdU9cj>A z3rWPs?j9fM>-bkbB9f^G6G%&ypQyek((L?c;UBbE;$)g3b!+2}TLcwfYF1=E3MwY>Yn9Oa5Q86^qmM@-|V zN`DuvydCimO_NseoJ_i&p6u%^WnLLj{rKAlbY-|?E}&?z#_I#3}E zH#FvKgVXC!la90rm#o7{aTIIGXHY*n`~Lvo4v9UNhqVi-kgwXD1qk z$#ZIl+P;eTo1l1a#+Q1#crNT+3!mSzLm?!}K7HmERbsoo@lDw3Yv)L!yRcDddlSJb z4pqhv)OwF`#dTmN{hzaMRrw0MEk1R#{K;5x%~N&fHFIzxp*){K>t2!0R3~jf%v*t0 zrA&0@s^bTm31gHW%9!^`nH?&^s3QiPEuv$R0VBDfEczKj{@}OnVdFigU!U4>aiXKLTlkQT@_F4a9`@87x1gd?ou$__9A2{6*9L#g$7tAsYJp z(mChwudv_ahlGx?XQtZT!}dAgCPsLe!m>9D8#X~upkU!h=dTs;_l)$99(c#%9p8!X zJUuO}cVIQVFd$`EA{!S03G~VLIp{@r*cd8xF6CvvYqNUGnK#R;e{wk6Ee_)69${x6 z?Hc!gh5ikNb(k)tjaef(V89SZp{_>TMb_dgoj&Dp*Y_{}73#hfy}Q!~`bE9OWcB^b zpZraF^REkOCHIarTIhW)kXfuDV^v&q01D%LWun2YY5qc}X&0v)iq5z3Rhp>t8g6}I zm;V5WsBS!6q{BMRB?ut?W0J7FUS%#Kq$Jk8rjdwtkPt+i^$&6hfA*8~fZinV4 zj+yDtYQk+G*j%(ayy1Md!lDiNBfECmPveU84PQ{Y@qV_@czQ@~?Y5%gM0^6dJF-)Z z6J4P2zr&3Li~j%{>u}u7facQGj=vU4HlDaW;YH9AX%dhoa9tdo# ztd?8pq;+Vm)D=}Z$ILn8o-v+BHR(E*nEGGCzZYtg4d2@uiT?nB z*HxkTzu;z^PNQ#k1-;ihOv>)-^a`ax{cFsACVXM=uB-6V#d;Qn1gQ6D*^*Ef%rZJA zLiGa}03LB(9a@vARy8KQ7UylPPE_gq-p9;73bBakB2whDw|;!uc?iXB*)fdxqAy)TtgB>h7T=4GBS^uSQA-S21tf4c`cHma+nbfW# zk~rks6pOXDasq>ucn!(($4bMDgrHs2I>$E~w3+?O0J{zWlNfX8-O{W2d zKs|CjIIQCC%FeC{UQT#Fg*q`G+GG1nz#(8;0FVLeoKm<1u&l#;1&-tQ2S zs81hHdcMA6{_z)q)Z+rHOs=kSKn!z^y=rVCq?l93Ak{};Mk>?p0E2)HQf&Rx+N1_H zDk4X2Y26NSoKso4^`Jy&tsvr@!x^R%o`QfBlS%F<$>~j?NB_|HwMcHq@TUx(05pp0 z(>STsm3k3Oph5{9u~vn`f&u;K9S``^6b$s?xuKa0WqzE{1i-BRi;v;|0PEH0_um*hq=e9qc zVcUm|K?E^loOaJ_)!owE%4L;61D3`I1F#@>rkKv^-SpTr-?9CY+1g>}tc3pYLlCM7 z&t3~--nH~^4B1~qWxO{KLc55b6Ze1vg;w;hHM5)xfh(c=+yR1m^ueyuE9mcSBhqd` zV6Ip@0t{mWU;)9&!9DoJX?vhMi+dK(E%#~PXFI@SEQj~Nka2=L4E}Y_&8tG!IJtQ) zT#(?~up>C=NgSHt^$2e?+o!a+!<7T3M+cGDryZ*0tCt>Jw0kSXjVpe?bHL3hn@#+d*i(Tbu-#7t88V6A2|UH$KC_K;ry%3@9f?t zU`9J)y&uEch320U`BH+Vi$3+rs`ohbJwG~z`U@Rq7Q3{OOg%~BF-p3uuW;v)N@#dibCe7lq%9PElNegW~M&tpod-KgntLUUL z-n@0lu2x+xAL3lsFR_E&8ad5qjC=n87taYI+B9?O!2bY^OQd{L@m7%FSx6vE?kc$} zpIxj<=ie0RG^N!rILS(>UEOKiN3P(2<(d+v!qnb6KTGKZKTrbP|<9}Yv#D**Fs;WDilE)1;-Trrmcx9D-wq9Tb4TPdVB&l)2!!i zPnkSzBOLu(Zy!qMxg673LyQ`i6xEqYwXxVIk1wrRWw46U#zgzFTR@Sz=eTe;1MUrS zdXJ2*L$sg?rE*I%?qW&qPI66F`z%MVG}B>hMs{Yi_B8tx>QAFH{{Z1P)xYP2Ka+p) zRXw-F4SvpHacZdygQG{iPv#n|jXu||z$wE{v_HZr`x<}BM>S98I;HrntZOjE4!Iq* z@02ixcvYE4pl%x;*{R2hH2E@Q(kve%`?4?ogCMR%Y4*dAxT)685ztio3TtI!MOLR) zbQXHGy{teBJ9(TEcgn2C>5bcX`c&GdjrALwWVX4O8+d-?2n2AixQ#HN`{OmwEGVOB z=~0^-cl*Mr(u3U#o=amYe;VJbm4XP$`-cAj8r;&pDtL+nODWbFZBsj9nlUtMp1W2} z+d=P)Re$WuGyDRYbjv~f0+XXl`AO%i{Kr`ji!|BW%$rBCa!)`djD7(m0De`IEb&h( zyecDBB!pa$p!OLg@$PFmY%SDr#aNCdjGR@*3Of|#C%ESk!&F~lw|*E_PLvzFMRUn)XI}WO_SoXuD4Bi1{)Vetd_mXjr(2sv zNF`2>8FgX!hOIZ#Z3rZ;YN6AuM??mXV^8@=bE*8tRd4Y}P}JiQ>oztQq;Z+;)VJWv zwZB@UsA^VvjLS8xlr|%786YlHob~zHh(5sLu*HS2JSY_^K#9uJjVMRqK;kFe{dC8|u4` z*doWcZ}F>!!rb*4rke|I?og=H{!(1(KBZ*w*0mF?vBeTP9IGf`K^?&j#dene02X{f zsM?DSWo_?b2O(}ESulM=H_Q*OJl8Tb`&@m|QZ0q1e(fhhoBJTDR9|T7$BV5bE9Pk$ zdyh{o*FTE5Kdok8>UQ_nO>u8+6`D3N3_G?i1~}YU^VH<^tf6UcItpi!h}l}W(~NaP zbCcZFNh2%s)p-;I=D8rw z$Bro)HE+Ii-lkDf+-hxXcaiwoYh^~Viqg@dIah`V)xq@+L2UcyHOlJVFr2)XF2>^G zL%E(p%*;=6Ny)6hwoAj2f$u{NF}c7$t!L~h_9@ky>SiAsYW<*x@^Ahsw)EeL8uglO zNuJyef+uwY@ZE}uBh!!FgtyQdvbMI*!dfx>1rCiTyh)7eEmVy~_^sj(;=Q^50Kbp^ zHFn?Po~p894@dY>7N?K3Nw9U>A0Pg@$>HcN;EmMl zzqE5F;+bBz`iK4gKjBQc_?Gfp?}qOEn2-mVDaJi`9M`q!-vm51n@osY`kLx5eiHa) zwC^%Fky_jq!+q0^#~jqgjvc)qDOIeO#C)fF@dD+b3bNeD-^3b5EBRM3H;r!zCM$I$ z_iUf*Udwr+X%>xfY+#LL+)w}y0CB?~UfHf^PSG^!F5!W#1iQ|_rGjLhoSuW;yFRN) zZ4;H%s{YaDr^cGkYiPgkmj3|as=?K-^=%7KH(W82B~?*?6tLs;uV0VDHum>#YpC17 zf=D44J_tEDAmfgh%~QG1blB|VirHOVb1vP; znZY^dwgBn))soPiDX#Bri_Lc-jyEnq8$y-99+~Got6NC2)2;NsA?^p-92D3WarsHX z{+O)YT@m!SS>#odEC3Dna0YnooOZ6xhDHM3T}t8^rSks(haCn-Oo5EmO+vy|LwFRj z3xJAsFD1(;e#1*~2@< z7Tf`o$-@EZ?^Q}VN(M+LjGDG@2)K|4z%B}WpO!$Rgh_IYXGsHrxDGMt$2c8o1}ZrU zU(}KQc&!gEX<$;>!ZF-H4l`@Hx?HFF&{i;aF!OsG#tKjDbo#w7_+vz|SRbwO^kD(dOTuD^( z&|@4`6u2sO#-Qihnw|&_>`9TIN?=)32cNnyQVfY};9DAH2G?a$<;XX%_{sc*s(7 zpK)1s+N>yz9MP2=44~&8nX7l#c2j=#-^^c>4oK>Hjyl!pU?I1bFFB>%yMljsa!K{Y zZ%L>E(c41=Wp@7n5Xr{`^IXg}rsdHPh~&@S8OZwf{cBd!M~-QCw6Ak1ltd6`B=euw z0D9(ydkot9Tm^T^@j^(>(n&n}0h|HYQ$^jN3mN|aO5RDp2N^sJ9yt}Ss_6PIlW+EW zfQ_h}7L0U>eF*O7)1dFq0}=~4?pc4c3l@GA9DgJA;;T?hl51T~8-rUBkNj5Da{_~yDs<6l&wR=&y)a`WDhGPt^9#PCnS<{~EeS2fDtg9_zOM7N`)Ubb) z9uD8{cIs%*cQmeS-U*C}o3)P%w|6Cf-Ohfs6|LmaT$87%ibc*>0I(S35Jw~HP$k6I z06cPt`kb%IPJKzJO|+7X&g{&m9Fk5uo;_%2XJ>b3rry{ar(ta>s4hCGC((M3!!_sH zj*MewAO-`xWH9`3R!5ZyXu<-_2>HfvJ*tZ&u*zk#BsM+laAFPT8@Agj!hlt&rH$* zPpu~IaZl$NreJ65Kn^F5V@e4FJ*k-;>7bul0GoeGH|C(vJkxZpp5&+@AwlN~9i2kAf+ znHY1{p(h5fKGF_4Vyn6X&*4Cr)u$3En{iEJ%>X>Vr5lOBsIq!eu6y^OR~~XY(`nsl z2RFK?|lW|`BaC_dB(cOB^n2A3J@PXJIdvkHKz2d!un9Ojy0LNl5y7Ah%o!qbJL z%zkz_^vzz00O{A>uSc-OX-JS=T&J8zC=OVAU{+I&(Xx6UM8k3h0LLP^-xYX;^uG_>OKY>wpD7Zt13iml zsS8;eUbz*;jJIpMY-(F9F`Nwa>s|+_>)M{Jde=7$(n>yEyKoUX8$j$i$4XRbw=2Gf zJw}#Qhs=-lKpp8C)GMdzF(XzT+7 z&NF}ukFGLDsivjDV&7oFD%+Rl0dtf6?`pHI+v!s@*V0EcYV0I}W&tCTFvU&~4CMOf zs6Mrn(RgUQvqu!pk|FZZWFIT_%VZKj^ri)?`R?yxg`$xGKtlvwtiFMW;g7%PT+PgG z%FM%ndY+l?D?&SqSYl}6fPP6?Pb-&KI6DgO0yiLNleA5re?y zM`gq0VL|r%Dx{K0oT*Sf06bIWj#X@KEPuM${4q~eUzuh3So4B^tw<1Vl1Wo?hB+NL z%|d~sm7D_}(b(XgdF0j9c+{w39D~5kQ<6C3jI!`g{v#(5bZjZY$iN0-RPJt_vsI3pOK20D>~$FHR{fscA|IUI3J$K^-_q^08- zqL7dO()hSz8;LZ`nvk}8flIZIQN=NiS8QOO2WoEc=L)=vdquep(l&}nnWH$yGsOcZ zOLh&N!9NT1v170>TfDZ^&khb6jm?h=dr!6UqE3lbe0(<*T3oXLDyKx#%(UrG=U% z#4V6}xaOZMfwuv*$>4xH`qKenbMlyp+Mf%m{KsgRUl)2KZ_aZ^!-k1g~E%t<3%kT zgpq|{LC-;f)0)M*yKwtiUn7s3kL5`f%tcjInIt4+gT_1K-_ncp0?yU8K26kPVX ze<@t^%8E`au+wHm#e_wWliD)!!m72o9f+;n30g^a-bE?2>hc9GwHNOZk_RV@)R)rfPjKsF zGPE$lW{|NVm;Ic2^I169wItQfS>ne^fTk+k8m6Cgxyg&1eBa_K!HFdKqGfUHYHT~E zWNkU6#w$Ug+t#8n&;>qvQW~;jrg?M$Ulf%wj+1A40ECJ$Q*ET!Xb~4`0LZCuMJ6#w z2>GQ4wLj}giU4nHQq)!Pf!vokG~juti3f^sB7nIr?3zYAV0}2G+$d~ffFU&>Jw57fy(vm@K!}D<6!VO5 zDV*k{T0$8}+*HMVDm}c@85KJ|w7X3^Z6N?Xsz60Y^9pY?z?&=UiWnZ5s?JyrDjzxX zgFxFcYifEBB8RYxb z)$sfsSOGv3{uBe=qJ3uJy#;l#Xcl||1#DXQTEW~E#(tEGQIqO$R+DPlgL!eg zA|LMYpTu)spW$x>`HquLqyRxDy>?n>hV=V%1V#z#n)H1d(Limg03x!K?XkQm+g3$# z&!zZf(%K?rn5-&81lrDe^T$f^4PN5l>OXF{j7D*`RLLiJ;F2&fd-273Pm1+Lwj)lK zK*7k6f=Yl!cI5OG<2Ml9Tp_yqQIg=hZ;j75>_IbWSz&irf#VIy^y^!5cQQ3KD~sE!c)UL~$TqfeyTbAgRKE-f89PoZke1%!+~zk9 zPah%4A6!<=#qEr-NiOy-R{$JmBv#gur|G(%hnWD8Z{w38;vt69fCHvFXZW+mMMF!V zjyW%5lPPjGw@mx{iq>n@HlZ1b3)x63^wl14`(u_Sf+9%?hD zi%bBcIL=FFa3>uql43Gvk}!9P3CCvO)JSe8CP-r&RAipm^gh2zyLD|mY(ppB?Z!X) z)mk)@$~MlXEST+UDKAdh06iK~tVT z&#gu;VlBByQhOdnAr$t3Ad*5@XO$E8sM+5P!D~o`v!V$+z`qO#{r2J$P)22Vdn;{qk zADg)ENb)m~Uy>=_U?fO-ih+&5Q-=f|xWzPp$K)8GzDGZJp1$6c-knB0ao&)G8;2B> z;(a(2+|mR8)A*}%5rKj#RANS1eq8g4t+?Zk^*U_|Ly`gXpk%w1&+di?=TgQ*WE>L1 zx#>_9j1W*0^{JziaogUQ%U*kJpib;Ow;55|lx#TDHQKI#yAzG<)`S+(ZIA|2+=I@2t2`&j#oHyidt#Hwnnb{u3?BIz z{{Zz>0WIXxDDvjqn|dpC_Ts8WBxWoa>;t|ksSc+lH=plytr$F#qs=>ek}yx@NDT`M zX`6M#cyB1E$>F5 z+)gCg(U4b-!z0umN|KHHdt7873aRru9-x1B^WwGQmTAe(4&R-Icp3h6&k62hj3UfQ zW4*8d`MJQ)f8|OHkMkE{_4TSiZwBrv!xqpzp?IJd=}wj@92x(%EQN1tbP3P{=|ya9ib7 zApJAPHRRgcY+GnppzB(XIynI!ZapgjRmjMxRttw<5FDPB znr3G}nuO>ItgVIVo@vRfxEOT}_x|{*^r}8T7+ZwzF?SkfmY-k^~a@0<|p@u=ULH% z_BCDEXqb<|uz#LwGJLiU2?MviCSNhpd9nWhd4D=^aKM!yisc4C8@ho>_L(*WQ?z;t z1=X1^S+iRj@2YYmB&JyfPE&(^v0iJgWx2iC1BBtv(4EF8?-58pKPlN{GQ=Ml;hDeHGIKBM3K zXt4Rti+*VV?OCwiqu@8nO(?i=~3?DnZAG?>*>uvP-3PI zF-P1}HOVX?e z=ZyXp(&_iPen74rC4e06C-AHAY9bc>++~lc;8JC#du@g4>{3Vrj0n>|WMu4Q|4dmEAwgu}1W2XJB ztnM`^pb3-`2;0+wz-|~PxIHThO*YovNfv3@R~$&lb~rsYp8V1gP%XR>Fk+@h8(WN# z?hma=4949VMlJ&k9sJ-PpGxWC(KKkR;ngL)hDge(XtT*32m`2KMOw7F@RV`15Je@# zyS&BD2P61$c)=dDBqNJ!!+E$|Ayd(0UVs76il`%S{1A4G<2-)7>gK!stu^PH9kRg# z4apQF6dCSDdY<{LAGDpGRLimbNaG*oQ4_ z=)p%h?MOrhCy_T}Bhsd|x_fCn!5TgRUzlgDIqp+&Wf&O6QfG~`<)|6!yPw95fa$Ka zE4$ldCM-mX6;*gak58{!Z+mh^6kfS+M2lTYgx+IO0mFqu|ISSoSvLk zoR;(3zmw*`CL<5INhn9~@-d%MKAEbw+AJDEM;c1$sONNnNGlNR4nK(fcps)@SMuB3 z!6nE?*yHBLQNP}&a0p|Vw=xdCpRYA=hgHJA4c?+>f0{r6S8k)F8_}X=Cj@jnb*ey2 z?iigyTwa*Nge(bN z1x^cmwj80_q=EeK_BL}eIq0$?1!1FvqQy?BPG z?Q?rO@G#iMdk!nr_03K1EfM5aIFU~NqviuV9_07$UPX8cU0f*R4u2fdku0W-gSD$c zW?_z_6`T*x)PE||iJTRGl>Yz^Bvc|t{iu~Iyz}eQm}dlaHD~w4eZ(M+ITa&EakM#w zMh*uem=qf2_#Ef?)GaKS#c4_|H*%4JzT%;1+D?8!f&Jk?470dpVv?Vg#t-y0Go}d` zAk}6L2>ZW*1l7>SMdR-v=B1R11|{4>k_Iz@)YGDlNn6Ucj04vvtz1~4Amrrr9f#vp zrJmwBM2JLcyl2k=`H5p#5AuaW&yv?u2LG4HY<%UPg`qUwcI6ZMsC5BVD zsPIoTfYCVM9)tNBi9$4jIWx4L)ew=&i1C&j52vMQ!5Rh2{p0#m1FBJJC92xQim1H@ zx$1N4T#ePFF+UjxJ?lfy^5Qr>1!KL#xh7G&)`FWEBCEFIaqKA+BKFQGX(QuepTyKm z#GX1*B(NnaFrW{@m}D$@B$|m&Vtdq~KpUH%Y5+l#(yd1jKQTG}RT8Vo$>C~deV|6X z3Sdglw*XaEcP5>J!m)FD_dCJ;k0<-d;-*4LRUw9zT~WxEEg89S6Akd4!#sdT zPPKOfUS-mj!C_EL(79PiIc8J$Sa$hHAddMJLTw?nAqz*S#;^_na&n!6GLDO$IL-$( z(b?)2_cFtKsePJ3F)K2hp&8)x9Bv&k+*2JLN3 zFCiXzeFmeYPHk@#9C8>C$ivlGa6zaezJy?8uO6N2sJXkdn!zoZF!AmozVTz6;|GDq zrDob`78<1TM{M{=lY0_XO623A<+27>e$)NH72${pfT_Kt(Ql08bwEYjUXC%5r%g zzg*N!EI%;m^*#Rp4ru{KV~whcBHA{bcly;fmSr1B+&!yV+=so#Z+~UEU_V# zzfaDPik@hY0017e#z_xP%7P$rSzBlaq4cJ2Wywb@x$jZ{Rg@A}r&_AValomVmpry} z_){4bPTbHUXr!73!OE%a{{ZWzNc-eD7##<-K)~ZU$e>90E1E6@qBSE0fHcj6j33IQ z8<^v*I1;=A$67!DvVGb@+#d9%Mn5U(NHXVW#%Kfo)%dLyRAG-bF4a9M9C~2WfjfHg zX^eS{b~vULVaHKYsU&3d6#R}sG{B@!I3sV?rdZI7l1c69QWaJ|H|tTbO}I>h?MMkS zR0UPWJ!(j>?gMZ=O+vA_Rc34trfSfNW>Crl6W0{Lf0?)#^MRf{>dF-u;1kmXcBm2w z;teQGz<+e-6;z4k1&P|endYbqa$cC(wknaF^WX64Qd?h00Xsr-)16mgWeDvKU+<>e;UWX?>-KyjoEVuBCN__nCj6&pgz$UBt<#4Wp z)F|M6YVMV1aXrf1><0&ME=JS!2CML|vSXEWQ zVYm`A&{3?*a|QESvTS$ZLXpmT3=h(%By-57Hp&$|mOW4Z09vGGq0^JgEhDKd-;eRl zP= z_Mva4GitKiNj1|U`!bcn2gW?Ydxh*hYmkd-pjpsCf%k?^Na#JYR(#9J3cI;z&;Y7W z%A>FY)9FAL=e^WOe2{mY%yJ1Qxi~deFSX3bz^h>W-(PRWp)wPYtTFAt;;-4tl1h;? z0g^I(v&}FgzK}h`kr}qZAfl2~5PM{kOtyv@m~Nc$*zsCdBsIi}22i_gIQJ*|)k~XJ zRa=Ni+791B4_=>zJ9Qn#J-ldl2?-v8g+$H($m>w~uBUv%aH~+s6GYGVamQXyprb$= z90@Ry0{qW~U&pO?8Wp|&0Epq!^aR6R!pDYfzq?=dzL^!yXi$molRX!cUZvnf()63{ zS6#e}Jk}QFt(PY!VEOiroxt|ZE7@FU6RhbHS!r`y+@Wb;XNp1yC0Yi+&$iOIHR77t z2JlZFN1C9V`q#8+KM@V=mmk|Ut1Df}<};}3ag3Gq#y=|a&3ew#cx5&@AUSx)T%qls zYL|9_>~I9B|-NhrzIN>%Ze5(C=J`Gr{H65 zcs(k_MjZek3{qqe91aIjO*>TN_TbaEFH}5I0vQo@v&MMz^r`J!$BDqtmU2g^?mrrc zxCG#EX|WYa2bORT1Ri?Q8MAe|I1=XGJ*fWx#f;V%!00LqiUveaLUHJ6BvxJ; zo1BaTnsx%V_diOF!x5gDs|>63sNPi^MmhDQ1YjbrTa!`ealqoA<+FUvQ%4y*0pA>T zpa{}Fgw@z>!6D_&-`1}~Vif-K=|r+e5ZZa$#XAgv)z1K64n3*@s=w`3h{V@NvFBN7mGsjAcI3vASL6RvH zi;)0Zr{`N%!z6L_tf+C3#cW%ZToMlz476z4LnQZu3zD4vRSlf7{jHuWkFzl>0~Fp5 zu^yGBY~m=SoE&_nu z*ycO?h&1g{oi6SsL4;N-gS%%<*%`(-;M6ntYR>vFkX*Fem|{59QwKbHwW|HMb6s$VtXFv2CRXTn@-RAs*WSG3Oa!r@ zMwgJgc>F;8Df9w4?(U|9t2(MT@U8(k{c9{nikaKEgP!?5)zRuO*jc=R8D(LS<&`19 z?UU1v{8u;~(XvTgbBuJRz||33TtVcpj1qDQBr^R8;}wB(r^#<2Ln4wn$T=UFtB)+? zj2xBc80L}Uj(D=~v53e6q4uHbBQi5G5(4>~Mt)Pss_!Gnz)z!q)$G%Qjr)s!2Bz1q(w}lH<&$fkHV@=Br+g7M(pDZdl*$0?Blo5{IFbAn8zr7oP z#hs@gb8{g1xad7jKdo4@nNsB)quv1xf!q#i7nOuq4t%0=S2)f)pXpQD*_l|&9z<)5 zl|3?g^PbgOjEYT3Jj0Y_892|^tvw_+3dLjxo}d1{wbEJW65p3MF}yaZ8IZds zah(2D%u8;T&P*%{^aIp-(gID5j2!b)$7-ZxkT895RpPkk1@@0mTC(y+?nqFo$BuD| zVmhA*+oZR$79a#(LG-N;9N$8kd`V=UQnHj*POZHB#QNtI$=KZsOTfYQB$!rV$pbxj z`c{vOCR>|3IUDx3)I+_e*!7$t*uAy92=u-x%sM(uSWx zbUEE()#H#|2+V6C!w$j6Pz_Q>Bk`@fp{Y#^EySDScHTh%eBIBntkG#OAQkJGsS|M^XBA`Y?4^SOPdo}$g}MQbDTuEkjldF5y+yMJzAH{d1Y`m7kUG>48UlJ8 zW@bXO}QEEjw7CP04?!Twd5dKyS~vvfYbwY_(x z&2QnoI`&A}eCCsGc*qU!T=l{MX`JIHjxpYXnM^L!&NGq4F{3P*s}|tC6oSXF9nEM> zZr0I%q+w)kL}Qcq`cYvS5v(A7^ECKidDy?*jC3`(r)d6rD_p56z_4F#dj3?m(kR-I ze5kwtw9*4KM4@}(Mh1Va49bm!80VTz`ObQBX{@JiPXu%{#7idD8~tjFKH3^XjlQ(_ zvf6bJDFPup=%&J%Q=?)62$FYPJgZ9Vu92MYve^BK_V;$2AMK;=4w3*n9M?DB+q! z{p#&j{Qm&@*{M9VL5PRc_NF@-t6~`Ml!ebYsl2Gse{`RT{{R}ZBxP6`4tc3EfpCL8 z`SqYIQ#S3%Vbj0sQEUpuLXOzuHF^GQWaA?r=kuqmr*mvV=NLS3(u)CApmlB_-U#3x zpQo))97KKlWIo$}nW>fUL5)BPMt@E!^fA1^`HzE-&zy<|R+2EGaS)Fna0WT3s^!|U zGOHZ-s!VDmYZ1c&tS=)5s;^qKgmf@*J`WYIZZ`}L4P!@~*4>af9C1=nrFIxu3v|e74l<*fr!2B)$R@Tt#6US4 zm4hh^2<{F@BhszkYF}oNAXN-eM4(`9-I2g29Bs%rIp&?M`4^gGw-(G1jxxF0M;(8} zS40zDhb=~##?2#Jt7_Il1cjt1A}m~|Il%{=r?EAjnx(Yq z+Vdtz$uSkc{{VSTdmi5Ph_fV2SqTJm=LFP#cbMSDr_9bv1K05NpbJL(%cX6+>{rB#wVdE(OU2!QUY)%%|op&UKFC+rPwMdf9 z8?0@Op!VxgWJlEgMxaR)u=6(`LDM3DHDwT&;Fia>0MEa*VouJ?RTP2z>sXt1jFk?g z3>=K->s8S(Pb-o=dH(?Q(`ZEqU{E)2!*>nFDe_v&yJ_fp5OOMcCY6I{4UP%LWIU|z zfN{@ikTqema;GCJ&}aHqWTY$Lt}(|oZZ9zBacp)t#Z#0WH_L9Mb*T{CjNtK_aTunOzyZ`yPCC#6gS&l|JV40XPh6-nA?xgRjvKV6&Y}Pi~&H#DzDb0$P|bar}3=9@~_KVR{kb} z!8X}h#?pobWRw;o9*g%!<(jjqYl|Ukdti~oI3W~#$K2zeYUWRuB!K*!IL=AV0q2iOXPnNzc%1d9C63NQN`j{ys<=ra8B!g8Dnl(c(sR@CsS{(6 z7-#Bgw(!G_zLh$}ah{)AU|mI2Bq+fLoE+AT?1^<`5TH{VD{;m~Fn^(}X!im@=~|ZX znc$Vs5>R6uxd)R>5n}4?enO!}P$Oh|3|4b1!Ska4PXHd6Bd;{sr*TGnOU*NQTfuUNckjmu8?TX z7%AY8F`l{f$E{dh6#y$S+D8~C2dx~e4ohY@Jkm=8oE05wskpL=1_>Ag9@)>WRf^H? zE?~Dz7Maxs^+r(OSqiLgPmNFv=MX`~I$!0P(K}` z%1~P%z({pp&gN~i6%k=nNBe~Y5R+4*`FS{AQ z-TbSt_o-2OyP@3k{jm5T)n4DuHt}7B< zjcg@vlrY8z_*Ig{p5&^l?l>r*XV_qmd>Y63oMe(884yDGtFR1pC)3yTrx@anOg7#}bI(so znGKUNt^<8I?Mw=CPv(z&>KuA0D& zjgyWr55H=OR6D)?U(pT!TNTrmx_B=Nsw+<%PAh+N8wD#l&-D(sY(&FmOm)u0rmB$ zagqSY?Z>Te-QHVGYZyp&Oxv19EyHB+KQ z#S|%qg~XY_E%$x+$Mvc~(J~kS)c{;+K~uThmOnxHR0XCo1D;Pz;*kh+BXH-fS{Cdh z5_w_N`_loEyz%^`s^nwk=xaGw%R7!38nw!!6*yl{{{UTACwETtfOwg+mLz_(PSHOR zg;GC@KhmqZx_#kIW<)~Ioy2iKhlWscS$OY3xk80D=R9Vqs{**#KEAZ@qd0y&DO>=R z9Fb77DdQC(L!5P}**5S-DKHJA85yJ}(>SKE!N@efHaO;x#r{;>((dfsQjT%xXaoPy z_9MTmS%>x=aCv_d_2+}O%{(VJ_lX@nhow`GO_Em390AD5!KS>h?`)BiR+jGB2HnkpjP}p909=CovbOV`n=%!- z+Oljs?Mu+) zC?3S;G|PBQ@)Sa-ag&q$sl?!9ocquOra*uO>5oooCY6cgeqxZnI9!2_hMsm~<~bsO z8@b;d0SB#H4J(bR`1%UYl(OKd+&lKGFifOuZU`JufU;6TrM%qw@mt;PMx3b&kfij-db4XWx44SoP8}Q`gN%2kJDRteywT70c_J+oc?-r4JLac@ zUenzKbAX6JQV7B1aa!gZ6ZVF=VnO+a26LWpeJch}4@Gw6;ALO{*U?0tT1Cm;;>>(Zx~EKpmm zm{%%P4l;UY>DGq5gahTs3low$bL~~%OPv(6A;I}ZeQAcrEEN|$j~<=rU4b{+=40kF z7w0{4aw^hJs^h9hqVC3IUHmBXuY=@ zM-0+}al1Jf$@Tt}nrel=wRuMEvnEGu5-YI3@asS`=PP{2Dh4r=>s-u$+f1`YpsF_G z+#GYxDBMk0`UdvJE^akzZz*=q7VY2=Lonz&9OImNSDR}PN<2FdR57e4X53>q?tf0T z=}}%K%R_#R<}8S;m^c6tl6~`<;Cw*eWp>kJPcYnOVB`{VN#G3j6@*^4HfZw+Orj@M z0|&oP(x^nzn2MDwMsO=y;?VhRDv}O(&s>cE06w);{iX|XB7nPj3_0uTTbQ#nT*+l} zmlo;^rhZa41_W&?N4O+atA>geit^NzwYdji&PoyYqi=Da%AMq?SC(E&^*uWO04k2j z;|ng2IQcFPV#)iH0m1eS(A2);MUx6LZUZYI+zvlX`g>M;qQuV$?Txtq06l8;rPz)S zvrU9_d|>DA4%L@+FcO(MH|xzYF+m%OuL|5})KsCAfKM!g1aaHFR^CA#S;##&=xVf* zumOi!1YVNpDB5uO=dVwtRz}J8>x!m@oxWl8BCaSIDn?m<3P4?1S8f>X7~p|ScVd8( zoxFW2xhafi3V%QI>q`g{F=jYLIAiPWK+9I>*AM1N#9yiQ9)h|B0IH(!q~ij!A#XX= zPz{5CJu6n;*uGh~9jF-?Q|Xom9<_cOaJ;$Bdw-2f@5XDfAjuL06sp1^rr(%94{1v)=b(v7Ep?^t}%+B zr?E9CsVuZkdQ*2AfU&2e{uGKVBVm9b^WLnZW3M$#KfO$cJ!l!N0R^$zrq^zo$4OaZ zeX=uxr@zU~SG74HU}mE63}WRL06_ASCp>lLsu8;)qp~`fH4%4Z6xkC50o9j}k&X^V z2SLq6as{oyQ^@{exZN)9NHqI-AzzW>F|f}Z93RIyHPFjBxCtRVfB_Y{IqoNjESv1f zc0TV5{uReXFjgwXM^TpT=xejoucUn=$OkG2@Vmco#Z>%VU0a(eOnjY9Fn z`B?Bs>4Vmk%o&Lw6Hog`&KM{Je@Y1=)|i)S zQwmQ9AW#Oa%vaMx_JYPh#D>Q>Bi68_!hlNqaw}b~?o!+$vDo8~F<8>bK^!0FO^7^| zC0W#kCkL^qXO1LAlGawk5*QxGw?36pTY~Gcv&XG9q{u6t0L4fSQ!YU=ayaImtioJB z&Ik8+>rO~sR8pq{)bUm1kgidB6Um?in8>g9_#KZmRzlJ?<~ggT24&qE5;|wrt1|4# zBlD!djAeZ}qz@l6l3X4!StpA(jIz>S~;L&2w*)$OeDtL-!40fXydIfnmn@_pgHeAhcU_E z3Kt^q@uXW5>&oao(aX19j)UQ!0Dp(qkNs)W%;7 zgkvM6LQxMO?)n2s9GT-F8l1Qb!RI}w0aE68S7c0j{y6llG`M+C1`0Ne3=CFBm>aHt zD!CNFu!p4yf!4=yC)zTn5AB1opU8BxLLarHG;U5UHyZCd!M#5_VvK!HZ$KpYdm zs}lIC$|;JixMd)o2UF8Om2$D&TM@8AHz>vq4)4aCC51uV82NYyu0iWdl@9FP)U~C& zi)1PS@wcCD-j&DSx1HqejG#n0QI2`f`ORtXjga{9mKNew#rX2W&k-O{ge9OnwwU?ic2`=xMG)4lDm|2Z>OQh70YOPjCStUQUkK& z{H!}L9S_&FTe*}ArceM&pOmrV1PlX@uX@hfnkS)B78g+xjHI(=$MIxz`g>J%hAFKX zZd92`Bys@j)BGy}3$tN=CA8ct9AtK_lRRq#kg*ug%5r)9YS9Nda`pPv0PY*>3p$bmn%qoLt)x$ipe$m;}Q6XgqxR(|Gu;NzZ@$}BvxNJw1Z@rqQ8j;E7S$BZlE{{XK_ zRot8aqt<{EEx7>H_O1e}9o&KbezXXgMiFyNZYixMDjoSXlQ%`6Y5DZ3p@lnkDoQNxpf-3s zRdCqmp0ZFomY0kS)rD+!=~UJIYQi>7DFM)Fs%4dN)YLb33w0-%vm*`3&Uid@{{T3t zbnwd*tc|s{;C?)RDwgWnFEFfpDrX>{Uvc#9ThlS9b2?<3E@sl@`=Pw(hI^C971cZ& zQ*tQZ{s0CxaZIG-TK7?GM&aLsQZ zkO$j^T>aty0M?)cA~XbW*`}DWwbE6|~a7o zN!P6u3IPAo_;YfB??5@G5I=^V3BU)X0~c|la|&tefGFue!+GZ-jr4XRiU5&F&nAQJ zDi+3RMswGt08FKEPFBhE#WOhKox2J%-k6CPr{4W23hdigljUR{@y<|!Mse#` zP^70hBR#4nf+Sq-1&?n}#-I$yMltmihF&~ydJ2>y$$}A>djd!2RST%^Uh$=B7rGzp z`>I^UaU^Z6>(N=n6^J4zA@gP1&jFZ#*z4Y%#CU5l$O)2qQ_q_i%!)xL86b+TreeTA z2z-sCanEd5zxdvJEmPx0m*JlkUNO;hxisxIWs)W8>Wox>z<|Y9B&HF@M@`@3UftupW8#m)zaPjiuP4*uf@3v=R?f*{_MyLa zu*QT&W7~yg#(%xdczmrStjKYB`)U=tJ+UZfzB}0Z!F-oygIvu2dduG0r_;2E` z62FAJZQ>n6Q@XeoT54OsWtE>~_fiB_T$6?X93HgpGr}M@k*O%Ju~#JRKPdM&;-1Fh z|R)oyNM*0FCWnpbpG46?J3Pw)iK(~k7t6?m#2 z4_wRPuZM-S?K07fFL@co-Rq&lOC-bR2uS%6NKiXwfu3h1cXp!}aY-8=;gK0h9^0`{ zn~9$xa?KdePJ`)Qli)ubYLj@H%fp^1noVy*)n|^<=GyVh+eFQ}FtW7aYA4xMNf&m56veUc)b*v|mei!gKnvGVc9 z71;Qn;zx_Gd`YZbSnBtXTTOdAK^${Bszzix6+k!u3<~Q!A9X*(-w$|`L-8S7txrhP zWYskNMU_Ru{htNlk-!E-aCqO6r=LVTE`?1-H(2N~*mn(2NK-CyatSC8h?uBC|+!?WB*io|(yEQkZI zqX0+@IiMNiw?L$X0g^fAtx|XzP-I0Uk4&5&!oB;$x`qC`@bASp9}%7AvxeClNxHsV zPA~U-vD?VNI|d)?6cPaB;;8%zzq_^9HTYne;!)xKMqR2%UGc^WK00nw(Df9>bHJbN zB$4Gi1q7Ta=sKTotyhFiD+aShW+!M02`!%3@AR(X!^hq{o6aL!zPW_KBy-0k&heP0 zUnzrvaf81c_aeLRg!*0PtKokX>D~>wSo~fMyq2i0Be;^uH=Bs}^5ocAZhQCLV)?PfB5=2=1!BSIno zEVo6z*Hk|Y!mXDhW`M>TGxp#?R6bq?e1=@ zpi6bQb|ywtW@IenxuZ;_R^J7A2l z$0w<#hK~!BTo+iLHdkzI1Q1W56zJfQjyAH6K_nbjrmd{_hgrRb=UKU(uWX`NmUm|J zLIPbf#OEV-2h$boe;co^=kW%U9P>M%(QG8zNyhmjCEFc0G3t7n26)_Y^i>6j^r81B z-UMf+Mk>yce{pH5+1y%4@Jf@%heA}3m~Mus_2?bh$u_1Hd@|W}-fEZ<$CZpd$qR2YTgVDPsXfHcERpJxsxz@ALQFz|=l~o6Ob!jtLUKs;`r?yw6SY*f z2Vs+6RQyQrUV-rwTk!?QfZ|&}6KK~HCH|Gb`(j2<`bEED=lIkPPreIyf5SGoviObR z#YrTI&ZTV>iV0JLDyt(8L&z#=2;kc)+fG#D?vaB@6|L>*Z?ap-KLb8mGvn*D8ul*+ zczWB${{Rs6S^Q5dlWE#mywtDQu`bZtu|8O0ayJqO9R_h+p0)88#M-UfJ&nwoJ&K%L z*;p~Rf&~5*3mIIHI!b_@BfU9v!ez_MJj>g%4%id1mxgT(LfY*SLI0i^HBL@O__)yc+GO$M6pR<}VX#5y3N}oeNQu z3d9ZZ#g%My%AP^$E5!WwoA;8rk>BQKVo4k0bJHJQ)c*h` z;ah7&;EtfF82rV0jsCr=SbQ+>UEEhP7q->42@o?9xq#cWV2tt9JyTe`yMpHB{MhE1-5>XEc~WqB=8FpFizHW1!oux=o)n%xDzOSmzz~c<8R&X? z*HL-#3&xgu)$Yw|@uOL%jKuRhM##mGp;B-`Q;b(@r|Xxtf3xPQTIxWN>-P5QH=p%w zZdk?+cv414=|zg>IPEO7ae|<;6O0bH_2Q(KG!g{l??QPP1cQUL_v_NRpAC4D_Dx#c zS)U-p>cUigr7%Z1^(Tt@YvO%{&x$@VzlG49*Av^p2|TP}ql}UO=mtqWD_HDhCw6@E zHktjQY}WSb(=i+>jm$@G22Ekb=i0OBcac0dF|IaBr3nL!jNtlLz4*t(i=t|GUKQ{> zjiu?@Y^`q6Tl|yVO&&wbG^>Unwsx*|fzVcdf#PW{FYUC?88d5IHQb97@!XRnkcT^b z=p}5tZkf-@aKnNs-lQHmY9x^|0c>{zKaFJSC{f*|+`fd4E9hSiXxDxQ`0c0oSWL(4 z`lL`69C@(IhEl+ALm?R*Gn&G>@z0Fm_=`51V|zBDlIpTs{eo}b!zvld`9>$)3YFuh z9lBK9N6+RrU{9K$cH~qtBwKJ9!{@2=`d6cTUeY{2p?po(^q&rFO+MP^Y;mtBj8RLX zpG0y-J7XQI&0aY$PII?{C?u}Sf-p%xN|9Ir(%7DOP#!>@jv)M|E&0n(5YA$0lUVf6l#*4;1OI2}HeoeATuG&2#5 zeMK-M+m3{ab2D&pO6W84)MSxJ2bWXRk7|oP^(N5hr@8KGGPpRP0~B=Q){>HeAOF+% zX$TF5{d!PwxcB-}WcAH3pGpQvo_%O16v4Rkpn`b@fE-RKGEFoLQs)!^>|?Dxe@csY zb50Bipb0wEn8_b78TF{a?~0Ra51Db#y)Y3^C3!Vqm=y+|Fcb5geXvIy9N znqXAlwX+#DaU>3-DS|uG5m8P~20P-d1RHlgC_*8Sv}1T5FSSqX^!I%NZH!E5q_VYo*aVU*aDR zTU=Oamq1zFkY}};G@jlz*&umgsELtR+j$LU#WER}HKT z9!GP~0tV1Ku>f*v9dYlxU8#7N#?h%Z-)lEBl0NU-CSe#q%b!ZCs(8a+)$ER!b8wK@ z?hb9OVv5=pVbPf(R8}LgU~$^3XdWupye}ogXqRRMyiY3PBB>-(AUO*pat22L^vF2v zKpxrPPZd9lJO$!E1$fy7_K~3JlV9ogvi|_BvA9euF)W=*%FGlY`VKmBzYZ*{d_QGB z#Xk|-i;E2+RfkTtW3*mGy>(Pn@B96I2np%#9zq(V8=TwS;iXO9pZSCj&BWIhHfEVEHQB;3bbt&zA3G z1Mw7o8K211xw)m}E{jQCqm1CT1eJsGq8A>_Fxli++ee;)+}$owVJ#wUZ-wiQ#@9*< z-G=0>N;Ec8DP7kXbNwo<$2^~|n!Gnc)set0WOQr8O7=-zmyFj`RvY6ItiKM@zO`c% z9@!@rCDH7Lkx;sI@y`+0!Wb&LI_#(_9906aB3nP#9TMaC4FdLqxt};n?)d($KI#4z zj`?zu7_@M{^*(gah5BSH7+NTiAuJ`n+?@i}`rYW%Rif@J@-mM}%_Ak_ z{QNXuzX>e)(2a1YecA1c29IYZEh+!#j>t_nK#gq1po`HfzY`mmI+vVl=1KrTK)%1f z()|W8nt%*3UydYF<%2VM5_|@l@XqIjc1yNk(1*$g0n9YxQ1@<5) zOyw`$yk$W7SF%E~Eh9#^A`cXc=07Gc+xgq)AYV7hZ`Q$BWE}Pm$zkN;7^ILNk2F4` zrUsemzPC0_dxeh@tx>-<|Li439644>jU|siD5`7V`Mb;1t4LnW&*tM3teAxSS6r-AW_{k1n6>nYB3hxGct?io=^t}=s(t8UB!EkUpt&VQiqzsxtw z)~J3qk20vS?Dy{O)Fp0~{a{JxkcI03dosq~$@l3kx}wJ0BpG(U+5M%Y{pBM2<22od z4I0!xV8h#{wN7T!e25(0=1=)j#`?FLy-sPgN1hi#6x2OXNckgEAF+L7{_;n#7dtNv zUq0OL2`i&tO4H+E5!!6EB;|rbC2fD~vs*u#-J2Ktmo4EkVVJ?UBTb2+Z@~c8svOUo ztcK*t70ls$9;QdZAZE6X@AIl{l%q@tKb{XNWEj`ScxHCeyKEt+`l!@_rK0#|I&mG} zJ*XE5kQ;wr)2oW%k7B21_VoEEk*lo2D|I+s z2k1{ftuqC+8#iPOo@)@5tsmG&Iz&h|C6ZTc9{Tdhsv^7NI|NU^M+Uit(q57T z_Z??&;$?Hf4lZl$053GlC&^L5c(>5)vsZ#5=~4D`uj*iCS267wtL34amv#}Or1oi{ zWadk_Nnqv81C6F5(Jg_38PmLZXL#Y7stuNyVRgmkAGpF9-bE3Go5VWVJ%bk9i_~J6 zCcY_jIZSSSpk1+vqfnCbwZXrS4g{_BN2!+Z`$sXhi3(J^wg1BWnyZr+RKo$<8k+uxs+9r+lnVV5IiX7ukz zKB@A^RHt9w5HzfM|JQzAQZ?s9KTTF2`-#ISi2chbcV<%FzI#U)ELl?f`!Ava$!ycM z8!^f3X={Gq-wPYCr#{6b2G&x9EHq)X#MP9U4q1_9Pn{YF3)QF-axZ~#Wyqr8}PB9{v|M+U@#(KU(`76ZLwM8H} zU|A^`v;J{DZ}d>lSEFKw#;e|TEI+uwZYOuru z4>jv{$qb9)1QrpFKkxDFN-(2`G^CcTUpyqQj=(@)P{&;K;a3hL&dzqy>Psaq{*3@i zO8ce7msS%MYvZ5n+jE0K=XNP2xsb-pq_Yu@=0G&SSZ0898c0kpYN}+p7?OdVbU45S z%uJgXnhE;-LY=D_MP*c9xJG9^{uDq+u6`AAYUOvUgB+abx3PA8`@3dm(K}?xR=o4A`j#Q_n5J`4o{hZY&p6MQA`R?@N~? z^4Y-=tf-~Yb=>%_Dnr8C4T~5slQjT9H zWxo9}HQW5+QS8cfx{W}cOI~5**O$&e21wk3-|R@U`@F+DmRH`#+epdM-8l{&i}yS> z7h52nRsVyK_XgdW2H5L?zZJEr{A2&xo~V z>v1mbJill&xoEHz6iO4u&1V0h7ah)uuwJVTM(mw^Y+4qgT`rK{$v~{I>y23{^pk`8 zv>!=tot zG}(2aSpBbqjBo8|$l`G6RY+-7Mocar%C*wAA)iQWs2v;4~E;jY0-c>Z#)}c za#@pWS$gEVU|QG@Q&cki$)cc;rw3Fp4QMel4QO2P`SnzTLYkuiR21S^T|kpibn|qx zB}y!DYIRMAtE4Jer(dA6|3z9OaVg}(cvtn*bF**^t#%k1h!w?~_>}@V$XrNsH4DjZ z2o)@^w$=&`lpY&k+TsC7fdrh<<+h<^MFXjgef~ zM?EBE!3*5!q;I@+SK)O>RK0x5R=d9x*pvL`Qn%I3B{g0EzE+~K}P zPzsa^3Ay_8thIHfLaW*W_=x8{UQ*xV9Z=5^^^5&SXXwC>;rO~`!v(>-<8X|RSy)t9 zSe(Y5LsFAIrjZ3^17Xx26oU(aczqtS##vBz&z#(y*f;zVEzet|?B~d)T;*ssZ;B}0 zva_j6ssD}%tkm{d^~H(GHf#X6bo)Vd5N)Jqd+E?pSo!X!*FxhSo@87RZN2;)S=N5X zfWmpHVxs?%;Y`!Vx@SY;CAzx~AGEMH$_c^Sb!3OSSP`D>fp?#rYHVz*mR;+4IVYk|o?`pw0&7+*S-n!d zQ&{4?i*7O%Q2{>+QxzAX_LHN}N=0{noDfAFOGAZ`T&0kjHN7vc8TfS-zOxz)r0oh^2ZdrqH82-b-Bcy%=EceQxz3Q?UpW~ybR^5J5yZgvX077qmDb4i$NPV{xP+_k(3G0bHJ2J>h z*Oi@1T1n}cO^+E4QGO*q>r_W1$A9Ooquszu*t+{=P|We2uisK4hOOIf>4~Gs zy8tjeb1iR{rk6W&ErJinv*7RADv7w=x6D(bN+y*T#4{&Kk3rtTxRghS(tPtJ6HlbE z=F^B>tP}&wMLq>em5)|zB>*33@en@xH#2LQDQ7^pd1@l}cy24YEnTGnO6*o}yFt`*8YZq+Z+^)`eU0PeY4J z3y!x^jy^l-X<_)-b_u|3f8Z91d!04BdC!0jb^72jD^@n`3^i{6&S^iG3pk6Hv=9n;?tDFCcRcq4bn z@%Iw2MW{h3GQO^#S12NOmujMhOd?NuE3opY3?Jt|4j35bee{A>k^`rlFf6DTZ!1#$ zCo@Dd;mH!mH{~<=uYc@+RnF#_O2{&#AL#aTt(1C7%kTAj-bT$fj=!Piq^)NfMj0Q- zj7T%iVttO@AS~pCJarzn{27S`0^j|qlp8O0+F@?1;wgl^nWWNH1OLWJKTRVMLj%o2 z!#giwsm*;-tkammz9ZeNy%k}FezDw8oLsMTU444$o4UaVCWS1T*Bky8TC5Mq-YxkG z_2DR_Oo*v$<#v8>zpiPu{&zB`cP^PKQN7HiR4V1I+?lB%>87e9Z=^0NFj~B{VV{Sg zOoO`YY`z2M&_*=yCXApbunQNd3x$U=FeF;ZMxQ!$Cex~s572Jrl9zJJ`RcH-P%v(t zz6;|WUg`(9l{tv-h!vVIO@>Nt==Io~-m&`KlCAUHVs4l~n(l|N(Lj|p8fgEbPoqG0 zS}*s#zfPvGS<{5KQ|e%tr64@!uZ-SdDlFM%L20_?(ds$;FoDwwp)sT}$_fCW3?;1E z5Zm`%lwdqm@aD}rm8%V1v&02i4&RgD#D+}+Q#Ct_+z6?CXM&;+Qf&CWz`3}~Q~K<; zBg5FH*(G#yf}T*j7Atj7`kEAJ#+R}TQYY@mQgmlEnf{58#?$;$C48Ff!90V}y}Z0& z@HpYyJ!3O*Ul8we<|d5rIRfV~ic^-^YT1sd7mb8RWl^5z?w1ufE7<(@xSkbt;P!6{ zS%u}9yD!sppDsaFTY;9B`l8<6Pij!TM+S`>u+r5+BDU0Ek)GW>gQxx{n#LK#KB<~7 zzwAWN8rPpy(0=e2f8svp?v2G9cdd{4$a-Bfc|3?O<;;^YeeIztG9BtMY0`vsRBgqQ ze&gH8pY~=8&bHhUe9y{GZ~6orp+8#-p?>!@Qqu75 zt23zLZ@RQ>+;=*xPO=~#uL1govS|ekn4QAu~N%A)Wxk>}#^?uG_D2}Tt z0Y>^ySpBhT#or{Sjz2xCj5F%n!crpPB@28a=l``Bp7(K!m9_^ifAjamg3&^jX{yytB8Jiqy;~vRX~}bLj-}>XU+XICC-Ie-FWadKo7E8XW(2Zj5S1WS?;$Y}nG4h=6 z(~(iXgbm0l+Pu#u;)R{W4$tG(l@-GDA%*5^zfC9+m>($=eJE$;wy7h*XLX~}jyLap zZ1Q@=cr%!zvNK%Xe}7=%Va|PCS8Qu6XrNvc4g76XSKGB6B#94aK>iqX*<8o{g9>NR zScDg@n!L#neSDP0{`IuqA)hQFWFaN^eyD_aP*q2YdJ@;t0d$&sR`Icn$>oo}s17&n zRj|Sb;GKmno0_F}OzNaA$*wxB7r`{dvYt4^?q(Wyx%*K#JBncN$NZ-a58IwR>&6M1 ztViQMpz=bVF&1GN&#_0`0ebRbZc7TnHegAV(dI#zNe4s4`pVkWMG*Uq9M7lk+5AWK zX#f*2Dvne<{JV(i-u#UQDjFewlx7rVDQxRuXU`GZtdcaait=&xX ztm{&lhE^<>h#I(lKUQb-^~sFD2{q##F+Lq`wx+Nk7K$>xG zOG1XE{E;_~3MOsqAHVrByfUT^6Yn095U>0Vhh{)m2^-_rd^tI9otpGlhS9By;%?ev z{Jd9Js&(rssFtDqEjUljNx6FM}z2xs0*W|fS9cb@;72#|6( zZtyL=s~w*0`f^ts&OL~j>V??P(mBkFn@G&7w=!$n+3)oHB_*Y~`z(uPo0YVk@-jnX z6ttDOufs+&*q874L4M?K_~*vDqqvVdguv?XgS~0eMol$~Y42SIGYz8w%XjbRC0=g% zD$2<9m40^t54ziBaTK3j(?8RgM9}BPmaK%keh_|sq?2m5hX(ecZm35a7)UIbcvktW z*RQv;LZ1Jmp3pJqqk%3Ii}p0`q75}lR1gg~H6#2FmBn?QaeS$=9}m>K8HqgWi&4FG zJDoMo#<#)$Fbr}x_}$9;(_5Lj7-?Jcy_@_j5gLC9A^e58Ge0N@9>3eu7<+;Hb;zV! zQ4{5Mk&9496s4|PG^M_ddymm(h)W;^dwr`xf(9_Fu%`+fLFp;#;=*5}zq|@Pufp02 zoi>n`^L6ENM{0x4kF=%bgNkvzxVx#YSrGj_;q<6d1g_eQ|Co0wew(?cgFa`h$;Tdl zsa+0+$jvy*+aowF)wTVNj4PRoEtbc*#_v2n)vk|ycd&+$L!~J&F%N|TE-DdmIiN_? zyf7pF=Ua02_ckSP6?|&V=nonQ)?>Y~E0X8xwrv)8+Ec_RNBrrdIFaa8V6R*dc`uht z)qV!sM${Q=!tKw8iL?x`5H$B!?0k{ijpF5L(m zV8PL=a#kb}KY8TR0&#^r@Q-tc3+RY1!=#Kjjr%SsJNm?M8+Bq{6(TO2qvn**~)0PYg6Jww0-w`yYq*o ziX4<(nOXBi%|Ntaa9>npVPDtuT@z%BZ`1aAGY}1wsMwMlIo8IJgrpgZZMaYe6yhP} zQ=|QV&qL4LtV6_-r!Cj~sCR~^l7vBoaE{LKoLh<6E;{fFL_iU28G`4MUF1&8Ox+yw zUC!Mg!Xt`WTq^ zF@ng8_s0HSIq{T%J=h`4&Hs22D&g_rfWdM9p49z`YA?5 z@(C}oLkwDFYYh-KQ~WXeA3V|Qn(rna1Yldp`F@&nNr`T9k~Al zOCb_PgQV#7b(JCMhmr>I>2yeD+)TJCteTdYqo0p{jnFy{!RBe zRn&tjk12obX&Htre(?np>)rLcqXE>9b4V-;g#vNV{ewj-BLZUcJXTD+( zn)N2tORZqwGBTfE*{ln5%MlOPdIdQR%AkShlkzUa_E}!o_;hEx7PTbpIztYK(FrR( z>aWZD5lGgb2w(+LfK8SI3VoHg9y(u`fY(pTLmQJdY4Rs5xN=~W5~;&p+s9EY=EN$D z%c;S*kwW~Wh7?dV(8eY%EeFDrZt;Uc=%CO!KNq;|nO`T~dM7;2PWG z)`#i>TcCmN=85}1pCFsge;zhQDHn3~b}l*;CkrWbVTAPf=|Z!{(loo|&|q__mqIzZ zK_VfrtWXwtVo~(pkKyGR_l!`VHiLtlA_*rdX++g{F*3Jg$$J|{4AniO7|nW!`u!$D z7Y@7*XR6j+i*TBr_~W`TlPIh*wQ)(6`zse+7(Zg}i%{e&Vm;qmDy>5Uu-fFp0wnjq zg()D=VOj9A*d`)4dLtB59jYj&FTBrlQM#vbUi@q(QcK+#qm{1QQynwyypqqO*x_-J zrMBTN_kjukLC>RfGxB+Q#0WKchIn=&Xqq`Gee$q{D4Q@e)jZD#m2JPQe`AJh8VExE za{dQ>X4@O7DWAIyAdbqZ(VDn-*q+@Nn%LptVC$E+Mo9Yyo1pj~3WA}8QcyKi=Yt$+ z4HjKc{fC0F9Xk;ef$;Ov16}pkA8%X9%G@cMl^r5Sr5HCJM>C&c@N6adnNn6Wm5~i= zR&_+B4>GiE&3JRC4F$n#|2=iWobBm4RX42rioUt~2G1Jucc+HyAY8rnH}!S_sN zb&4f5GsjK$pK3cM|KiS6+8ca9;LrFblSmt1!=}HoR*xb=RgcQd8EZ!|OF_KnU=RrH zKD;oyr%{W&@FvXkK?Cj}Xcff$&GM!q$#sD;8aUH3``O*ta+_^S1X-}?K6{aME_9Tw zgeZ!Or+X;D2O|HQT@s0)bB_pfiHTsnHV$`9-%If3^&Wtgl}stDuxLgWunJ=OQj{7MX@P_DP#Th=S#Bm&lH?cB}kl0m|2pSoK|QX zyWh6uGd(h^5?PvkzqU^EywjCB8DoNsV0js%@hB{oWRVmQoOY*l)Tr=7m;Er)SN6q? zX<#t8jM$hlusG?gk`xEWZ-R~JD1T#Nuc(KGc=47SB_9f(B86u8e0jE%bkl0@MPJ0O z5L$l~aSk@?hv0z;xCgx6TRO!hP&bl?eMZJ1mIsu<*PffW?PhzagKkAJIUzg7REi{PXdqJ$KV?r zCERhKes5aY#;+4_cFZ7sn+qvz>m8ygN=5GX%~csCOs3Uo>f{u8o@7&Ld{d`z&b`F98_ChEkTrG93H=_QHVV9n#7%Of?O~JoPj&y6DO~oow-l`zl zU(y$*-TRk4IIg5r%3GFeg49j>UnL_W#U26T;eue+g)S=Bd!6}%PWJ88KPS%+{dJ?V z{1N_4BnInrwm5pX)~Wbt;EyKDu5D$WLgR*B@;zQavXQ|rA+xd8mYjIDG{Pf0X*Ht} z`$OeHu+u{f)UrgGA)$SN!Y7@GyZKtY@9jgHFce}>j!Mx0o9$yX zut3m#_m~%z(Y18f)V(F}|1%KTS+SOV&+N{}yqB}QTM-NUMTZaR5agwf2I^XDujLQ^ zmC@gs+&xIiR^UaXhASdCaS_lWs)IefML(4yBUI(1>$<;h zt1T+i+c6KeS+6s;?s3xOQ%fOk4C?E>GA36y7uWBV!ph?RDI#ZV0i~cJVP$9=8gT1$ zr3K5E{AC+1@C&%)42_O?yo(g|ITJ@j3#IjEs6mxnXZRev)rXiWljwJvm0nWg=9@NZ z8c6+hdwldBTe1!V4JgcE{01}c!lt6};P+hFXyBSEJQEy(@~FKvM7T3EO?wI))vgK!pr_ffHz8s483v%>C=0?v@=&lca;R$$ZtPLaLO@*iD zxiuF|A3Xrl6EF zyl|FU$sm?wVTIYd#7}{V9V$JvzhDpEXYE#g_c@#rwGPu8`2DF*f*>ec#^l?$r8(^J z6#ur`39S<=bJke+U~&-+D)qh>3HiH}cu|cM?A%OETnZ@JJbDJ#Yt!MyWL|p~4Rfid zC8iA}RbgWsioXGYP3BHs#hGd8aLv`9at3F=F*#{wpVTV`k%ZhlA7ti7I31oMU<;ZF zDlYuB9R&*0^`~{5yE}F=Pkb#^hj8@3k0LVp` zdV5Nul^a9f5Pi(3)LH>yr*g(=T=W7;A1+1NHcYwxT(;dNYhwap)BA)+rbxiwh7H6_PQS4SzO zC~ne3o`jPH;SSi+gKeEXpo5A1<(8AJ{-@q0t9q;>+s0UvE1C0Mpf_t;g)j4q{MNd@ zq0qq1a~BrM4rzx?DB-g8l9j;JlHkOM%6k1PaR$0?7@&N38sFAT-_QX?~v(lgBmu*3hJs*3YOPWAZSFd>iPW##W)87#@mc z@S`o&0T>O8Kl@Mavi6*35$sKR zV6~;3+l7B~ro9@`sVBoP-u}BfkzaW?He(mlQ=jG;qLq13ZAZtYW(v`>@44QbD zy_BSi)pqtj0KU9#K^5%<0v;SLJ0D`y;f9j~XOBxkMa9rA#m&#%PS9&I$iJf3XkgGZ z`_qlVE%{9OjcWR)Y$Bac^2J4VgM`ADjcMEuZV-~U>oE`vLBS3OdxD_KK{Mg@8N2}fVb zdd5dsg}*oU|41_$VR{qf*Y6!@6ZU=!osG9vV+xsd%$&no%ANALE6}^O@glq#lf2Iy zwKWP~?Ec4t3cc?WLIa;II~q3^2D>9zNY<7u8W45m%-`$rw)4MDHUEU^@r()sB2#S2 z!%R|jmxD!3Exoo(d)zOOYH{Lx2haMHYSQ^QQj(GyaSTO+aVwdgU?7xyiQyAm|L{1{ zt@Vp^k&`VJ5zPXe5O378lXj zpA#Tcml-Tr8R+epx3i+U2?y{NcHVM$$@z+EOvwRZ@kqU4OT}JjTw!<-qxX1vZFY{U zNXs}u4E%>`sb46ezxWkHXsAL-p#1sSk&mAbhZcW|@8q%XxLy^v_%&9|{Tu{G{FH3r zR>1#D?$hnNuk%YDpBf6AbE_LmG{!B@!Ff$~S(SfVmv zp|&K>4NuiLqWrxvo_G<5JNZrfT&fv*KMM1NI4dr$-$yInRxU!2{xf?;%{hJY)Fj`W z@0q&lqw}ufTl|Xy`okc&IihDH>ojqdQU>+KEw=*`1H-g_@%*>cBkH z$H#RFT+TUpW3kD#)HuwBaM^+P){>6!Dh@`6IkJn&!<-E&#KOB{uuoNGRv?sVCI|jH z|HYBq8S$duzQjWRIt|!+oiv=yb&X@ia&h~g0B(7QEkERPfj{ie!V>(V2ldJ$yypj3 zg}`k$-%m0nQZ9)jKr2FINS<$#gk=rIV1UG01Nsp^AFMSA{beZP#ZXsuvmW`+kghhZ}JOq^VG73?-nudu@|Otp%y5E+FOO`=BXpa z_w=vltBLB>no|}d2t=jqthKl{hqd8z%;O|oimpN!kUovAeLxZdl~r-Z$)OeIjp=Se9jhEPQ2v#W-U$s zQr7p6p0VN)YWUW*8Otr&TGXIyZqRNX>Bp;62BzvWDz5!=q4*!`eb!cowTP+b9kU=& z*_BTpMPt;j48=}C09NK)h*{>=^|WkZJmLVR_WqvkV6W~k`D-fzKliOGGqR}5mjQ~a z_CJdTSbjs}x=C+EkZS!%w-An49s#sLj5Cv>rZ7JfmRSO%vE zz@{?~eNEBXPyfFqN6w6$=x} zsjrIz8C#s^6#Mq78Gj}6eGYHeF&gj(j?EM9L{U~7OcWbtk#*-RWNXivR=x^~^L_K7 zKf-la*W)w;5sQxNF-F=jgx%OW|L`bjMA$~(MkwqS?omF3KKD&r#mi}EP6q^OHPlV+ z`J#)nYt@6nGhvo&dwD4And253Xc{OA!JPQiP1VIQffO1tlew5r_$e%K_}Q}xlU&*B zB0pndFFyb(J!%oJipwFM$#5~H*|@_@ry6RgV)fiFU>mE8k9@=KgWP;_M|4E(e6if5JK`1!TA34V(IKm?P4i&cFbH4=ll>IxqAxsx%{QW| zswYMo?%nWAwDMGZ0mabM#es^0TK#2a`0i5nu_xQ3Y!K6FCqtEy2dl_v+A;4&@t&Uh zB9!Hn9e%%gVS8)h_2+&;5&taF&#Md-em^VtS&FqK+3(B(xZ@!^wLM}eM``ujaL>vS zY+9+8D6sr0js3St$)7pFMN6lCGfqROvCFTf#J5{eJv~1h-=Q1^OC@u);g}=#6qk(` zWLXY%_`Dzv27_j#_PFyF+LxLDY^_Qe^FK`{^Hx8B1(2lhdpY+1iv)Rh4G-Bcru-FTpN!-_C9i!7xD7=SAYcsdZ=Y<|8vfK; ztqAlIH1lZdI|+_o67|Nx4VgkEBfB?*2BNi~q$peC_U@+m*(H5L>fN-YJ;~M8DOPws zv93M~cM$iG%tC~mo&&6w2ImMmS@tJ&~&+A$VTn+0dla&qlmBHS*@L+g9ga${Jek0z- zZ-ewXfovwmju{~UuAv)Lk*JIPoSEjg+FjcN5D!d0jblwA!f3E=1WTZjLQu93D%2z0 zjmoR_L!g1-Y#+Mg4#}@7^#3esxqvX;H6FU3@9-iAMq69kjk9uR8<8e1YnB2o-}_`F8Ilp2B+9lx@^*gFkeDNvn0{+%wVI z#XYM{W>H#@Q|3?qYw0on841i1I2RfgBM97a=`M@k_=Gpjb8~Y83y$H8;Ac|Ho5^Nq z0E6oRb6hGny9Y$25ku8zAR-fj6dUmIn5Oqs4XUj*>tAD?5xHwFU){UfV8{i%XDRW= ztiRgRh~zpvuehx&mnc)tJW|4ZryFW?e}ctg86d3SFsvD!pIR=G-Xg|!H6L3+()DzZ zJtT8D;~;V_FR{>NFRkZhPA5%|GD1X8)zKY_^La%@j`^t)y~t!u@a9+D(!#Z@yf{K4 ztE+);NvNKc6%MPhGHXG^JzcE#3GABfpwyP{m^q{k)Q(|}BLh^ff@x2qQ zAfxwKkxIQAK1z7s?9L{%c2aU-r39G~BBt4#_x#h3!d7-E50SIwJdYSu0401}sp9W? znoaC49ZwBL+MO}6v_UCZ5|32jDLj~R87}xzduj&hOCJhjBj|OVC%9bv@^EP3>^VLs zO59=_LA{iCo+p~#K6~mC8?$gq4D<|2Dde%Hv)pXP(>xpj;M&OVKx;OFQi4b;P1TiD zO;cZFHiD5sY<9IOHX}mNe0_B02 zgMtiRKG%2SacFzDnQo{=mmn(9gGC(8=;(KpI+3Opa$|JyL>ig;{yiEf2HRdsL^w5q z!FR3&+D`tHHcDsY2Y5PCyJ1VZn{#NG*5 z!d70IyXr@DIor2UzjO$Z)D3^VFB^3wAL!=i;4h!NLVLHlew`KT~o10>i0$=Pe45CwMtVWsY?t3{MAXr4c zN`BbVMY@_gAjfP$f6?N_ua%cF!<5dVlgHj)OWQyj`l6UtB_}q6n~D*(RJRV~)-FE? zr!7Y?EKcygKO=dDo%@D)Qzxtwk-8xieNXAEh{yvgNnFwokTJwp6cIg2h|Fk+>h(Xq z^XuF2M@+>-ns?)ywMLsBLO#zA)BNZ!?O9kWKV>{0FPirr3R|tbEhdKC)9%7rvUb8s zwbqjkPq%Ixw_kNk4MKn}{vZYJA9H7PPT!Nre=VkaM-XqRYVln1@+sca*ba6p9$^hG z72da=POjhV9>Wn(Fv{h|Ca6fWJx&jm6lL-&KgO*!jdT2Y1*|$IjTny6Ii(4pew&}2 z=&ct1zV<@~Zg59;@jG)26#_T%T@7;VF#O`aUT~QsKF~wFT6R##Am1%2cvN>NSkcfG zv)TGRAjN#eK7DR8EAACuZ!u2hQm0dY5!U^aphmTpg)^qdS_KM+6oeJ(X;Ox&N`a-T7284VzRIuHM69#8A;WAY>1_91oW%*{t{yf`M!7mq>%?7z^!`VHh> z+UEJ!y^xfcuhz~>9Yk)SuQ^o2^pZ-U*o@?gf_xWVLlTk-GN>?!$EJY65!Y|`%SD@I zX`^h6={Uxia14%o3fm<5B+F>}m^qpHG+Q;k+Me7@?mXJKHC+vl#Ytx@HRTy`FACTB zAJj4K3xSu(-)J*|hF>O>Z9G1oxsd|9Jn$CY!_jT1bfW=AC>vrsySeJ%JAGgr zc#kCJ52hNOrxJj4V<%|A?D!|;8|`U}x4#_GaD~hQu14lsgrd6~8!WMi>{j8IoUNP&)8KJ_ zju-u|ogXs{7wlBo4CX5O9+#Rz;2dJW95uxP{b*}{%Z~;!*oq;4$P~{^!euL0De+mp ztmAb%3z6K~x90TTbIyMLHdh}1(ax8vrA>uJ${HSvl~F^is?Lqm<#Sijh39gQnN30$ zq{+gq+Lbff`kk@1X&m3MFue?3j`_lwcpe}g_*^ixW}Iw8GYBs_5O$b`qt#V z__hCy>ax|!Ee(>V6}a*qWT#l1cjQYQP5^G>4JOPtade(3*mPVQo=H1sm(Gikyi=sS zH7i>_xaNxCoQU}&@X*AWLD)$V$HbyiQajD5hwI4Vw}|E4`}PNp9_HQc5n>E#(L-fz z*3vJS{-F0C{JAvhI@_r9%G>v|1g9zqYlN`dvCxv%@I4R8ezrh3>3$EnhDzaLNnI3Y32D~1|=CdSuFf79FjW*UU7xP-!9)gQ?4*bi? zvP!8zqr<+qox=COS?D8&FFIII__%0*%%&Fsx%jYf-oEOljVh|PP_e%&;IG$X2N80< zBoPRRV61nD>6P+fU6LI$Ru6Vxj9$@uL*wJgrouX#_V(ZgBr!8e3( zr?mx+tYwn5m>1J34g75T(&hBqk*Jj>Qsm_>b_sS;$_I=??h6mp!gBw^bRkD*0JZIK zfCjJ>_)xZLZ41p4I9j0$;_G6c&5K^`(y=k-6QdZ-=8GChh6{`a7CiprSDM&;G+>{p z_ao=du~*fG+Y9Ec@R~aV13Uk5fHglGw6T%)~zU%vVn1DytN*c7qh_TF`J1O7oM3PMC_t3uZ|4&Xr z;fithA_(k@`8B~`wtAt1PH}!5ad=AZqE88vl9G1QZGG=&(E!sq37%#l4#H$Q$|M~- zXF=oL#ou*bF}X+%lH9_tt;5_Lr*PBx^MU{$|JwDHeF~nQ(2q8zK>h{|H81qoP8q6B>7Uu13{Nux*%u!Hb*=3m+Czpz(6xC(w;rWf^ zpVdE>s7n}UeXcI{cbT`PTX?}1PDXPrCeMeY-FJcdmse_iijNi8P=EQ?Xs3?_Yjfd8 zgxLDwlAFu&Y>A<``xP;4|C zA|ZLPKvbHn3EH10=O1P{pW05lf9%#TC~zzw2x@MR-zel-LXMPMs~=FG~_*8J8gN zQtgXcY4ef-4z(?}x1EglN~BWV%#{_a8zZQ{hkV%z)vgnY6;d;u?dW}jM-;1SpPrtc ze`+o)^?Etw|8?;u&``ed+xVlAk)?_3+sF{Ilr3Z#^35(=C}hem*(%wFAtL*hl!WZE zOtNJiJ0-i2-HecZ#2C!-pMLLo|L=R==gfW1bI#0p?sI=WpX>Tu*Lm)Hv_3VVUmfmV z(68B?$(SqvUORabBms?p#Z2+#Wz60MMx^IS&CXG?$3 zMwsr7Ylwfz)+RoUFvH*9qI@+|UOK+%?w#$2sTm%0V8K^|_k4ae<4k>6zx9!o8~!bf zMrB{1i7ofa0%;u8HIq~BA6n9u{g<)ICFrL56WvdiL27}Hi~`WkRyg4Uu@Q%fo|%ks zTV`IVjU&w5yUfb+#3i*I04+e$zbsoKAT+^v45#om;H<$UOP}16-T%SD4Vp9n_iAV` z4=#H)ay)%&{3Q3gvLousdd#*7zBC*4xQk|JKcn!R)dgXbRDf$s*4pHzr_61(qx`IS zr+$Ka+yxZ82tdx4c{JrD2Tt176%uSDkMh4J7WmP;@dMWD*Kwj#8}P`MfISQ$ef07O zQm3f9DVuHQ%@C6Jf)GpnILmdqoojl2k;C1K{F=;*B+Wt^{h}&Nk!1g7Yq23- ziSClx=P%o)d5K!aAx<1o4*1-5cx(=6bf-lJ%gn=lyH?_h31=t#q?hrl0}ts3*bx7; zA6>76NXXs$Oa)xG!v=a92wmM=4c7@9@9w<$O+Tui#@H+G%Ohu=oT+ZpT^OPD>iut( zLh(A4&)%%^5wS)FMl&g0>Z{p|!y2sexyIV#kv&&AI!z7jN5IrEqI{&mSi;D)q9G57 zJSSp)dcAv|mPc)4VIDoY4(^CL7Esrt0uP8Z_vem|E8j*;=Th__Y*b)c6HeyWdIf(h zBV{!~x%?;6_)E^;fl=)IitfJGkH0uI?OxALb+J7gtXtyM{MvCv{_?=W2STem*IzL9 zJ$Ln0NWS$=<7H?Bo>sWx37Qv4*5mKHb~);mZSC3b+Zu-#Hj8KbPS3^AhijiiA}oo_ zeYVQo$xL>E1zlr~YA>(gF<0pajtYDsR`9nS0(`)l;Z9jAr5R5-U$wGP#IT~ofn zW!euj9rY`Q-?<1jz1)vTk=)?Ed*jeHFJd79M5)fv(t6<`iHdiRU`&Z|a4uE`ij>v>=8 z)cNRngle4+in%zv67GPe4NKUDnpXz+vttFgobRvCEU3eJ z0g4~YjeBf45Yv6F`Ym0!_k+%w(0jZFNAyP(sAm^Td(u7g{ zbR>lh&9c+?f$*pPWP3k)cgpJw{06xvc&h!2X)olQ8|4M93SbUK*?tZ+d(vw-a1>_i z5_DgYjxfBPO9e#N45+~8PpI{Go`3ZPY&Ou_s*osIb=74e(%;YDGr#_m`tjol^*nOU z{kQZ&%1)wZ{ARx9GKT$tpCMTlcVJnWtud3*V(Oe&Vf4#vYaTB^pS&q`|&8R@O`rA8`slg_4=_Nru316Va}c>gmP@C>tDoV zi8A-pe(kt^Nc9%@0T_4&&}s2T;mx6sB;7%~!@n+Kr{3`+rKenvaje&0?*yY4(&!F; zj|6)QzI?J+H`R4@%j7xq)r_&-8xb%1i65WWfO@O>-#aZO+ur}_3M4+4jh1iKI(Nsp8fB=H%Iw1NVxW9$`d+XOijbqG4@r9r5i4p|8Cnmw_@cFo?a^5 zj@uryYqj^91fc2`yW`bl7@#pXo_#->+R5bpRB~Abrj}|q(!X>;gL}@V zjpW=&jCERi->S1*n=1S9QL%wJ_OF<`)^SVrhc?GUpln!sND9AehH0JHb(`?#U1@4+ zd@5RF*z;lfP})><L-9|Pk-xw%817tnnhq!`T@@L(!K8&6$O-{i+pTUQ~J z*sQio#7o0Hi|0_&x>q+5B?w*^@xFMpS0;8KYD{x7TEw`E40EC(&irra{Cpr;l9A-2 zG_pvv?6!4ySxn5K^em6nJ;4sGWJpRTq_jUsV-((NcN4IgWtbv}Q-K_sXAj*_`fzF@ z9#udzS#sW!OjsA$Fs*(Y&-v?jeUQ(4wVLJt)uEs~y)J2a29Ly_p*?Eqme3uDnLBUD z?TM?kp}Mm+W2>H?1FF{UE;`YAIw^W1Q=4nFUe2`+6`=6)`e1FiKP$Z{@uC9itFtkn z5rG(;bv-{sQsZ$T+Mfu*->2{dPD)-agimLc*@YYqGb-#GgDaf>Qog~)A_Pq1DT14n zi+J*I*EsXMLwrW`xclm|7`)Mi3Lx2Xby86=&md?MEaKS5JBU7N3*9}csCg?7j}bWm5EJGaBi%9 z&L{i6iJ4x5_vzmsdPjvY&F{cro;!Zq&T2xb_4el7pRS(q1HWuN3cV|Ko|qEPcf}2f z$2~n9)SWJJ?9WXDw}usx1SWPB5x=4}5Vs!+{!EN6P?y$vAf$TPMN|j^D+;qi%j6o8 zRf#UcTe)!Zi#i0EDcD(Xw*!;N2kx2Yt=O?v^YZ4Y4so0-FXNHdg)Ai2^sk_GrgPt5 z+#$cdBUtMC5|L-U$2`FD_kW4Yc+RyrmBlv-T9OxXFS|Dll`qfVp0XR+aTq!o?M^k} z?UI6wM2`IX^A55;5sIHOAeNORid*e(k@OztVj9-dOE;y3bM%ffKb5b{X{g<>>HfoB{2^&Gx_KvORkT!D`#j9X;~hkM^H0Q`;>(khzRu?mdD&pKjKR|zjf%(8)RgT zGZyK66lt>{Cn1xGw#o~x>K#QUH5#bNp6oEr4S4-p_A?(QI!gJrvMLn_adbKuG?yk( zEZYese&%{(MMaKPlE3xQMO46ac{6xsO>kB*qW*%{R>E?uO>8+pOdQX1y`Pv6uXJ81$AA=miDbl5PnV zaQutaB`h*6XlhF>eO3KKKc8V zmVkKs+^r{Hi6sF!C;#hvn&9_JKIS3GQ2~div?TWArDMIXn=aHflFZ)JtR@v8C>4`vC3bcY1n1zQwC{2pdW z0%~@G__V5$Ip5$gTcjS*1v~umq>q4z^}_gHg1a>*Up_Yxpt}`%XE|m(>rWUv(id;q z89&PTPWeQ7xCv*P`nlyW@O`cf`9x^x5L$HOdh6aIZYt6xkxRzZH2dt=+P8E{EgvE! z)KdHQzU2BVGLk&R!77E)`kM%2q!&564jmnRAEo0qK~k}M$M|EiQT<#vk5mqQWU`qW z=|`7<*|jrinRViV5_FR8`4O?VH7905&+zN(6{)o{gu|)hMA3QqXEb zoEHaBI6~9aXa8R*?S;J0VPSej=Zs>SeuotY`wvq9P!-L2(MF;H;v)6~zvWcRt%^T2 ze+W-!`@GIICgytxXs*sw&|T76!STXv>4T28KT-iP1w)#~Qb(O=KB5_0x7_%BA80DWJ1cKjFXwiS3hJI zHtUAk2)Ivi*&&q*Fg?X$W;6$=0Ao`5sv8u)MXQds>IR`>G?M6c3Ja{UZHX}>zD(pd zw&2}P{kc;Foj-n39XXd~H`@vXELO)v?07BxqzEtQTBy}=aXBZqeCjCn+x~hba)UK! zaXW?BTu!Ei#;X6I zZ#I{PQ>9&Yl1d#P>+6PHr&2xj(e|Phwqx&?FJujX&qH8PPR6l+kY~U{Uqa^IH!trVIDHsX(mPO367kr*!&+ zu#Jn~<(7mXJbl%a5e(o+O!cG!=TK2xig*sT#6KxzA?(Q+M$g#kj(GO;=yCKwn8T=D z*6zy|)8%)@4wuHf4<=bG$(OkkxCA4w0TlYlvjF@#gVcc~K}>aTI5wFK*P#OW<$voJ zX0ZFu>S|-xvMe}ZiAHJ{N6zw!o-&c;$jL2ISm^mc_`Rm7ip%3u<}G>+c2RQ8`FgeW zlN}&&Aa#fpK&GFXx>2~jF$eWjU@UO6^A08BU~yZI#v&X&cpED`m~j6`mdk|MfZ&9m zLQlO)2J~s^CBqC=u)x!?HfKw%Z_0f3<2Ni;BjN&A)5R723-iu?J1pM!_*q%F&PQmy zn*Q<6lxr zPSUfyIM=@#0Xv8!xDsia_1szvYME=wbsp_LS~|D3+Sni<7$g|!9KiV(5k<192$br!))XIc*+)w|pKm;^xFN5y zQzg%MaD<*j(G&l+FAA#jYkXxJYA`##k@Q}UK|h}5f}Ta=Z-EfD7uNu|V5!nUOb_Z; zLvIel#A@T-eD=xeOBh$Q-7|R;8bqErn>WKwmmZu7Nv(O78#VJTjGdg5DM?tyWfop8 z&_8eYLs;_L(t~}^9n_3ddz%EF3Pe19hC26)T)!h|s5-Bl@2C`KB`&MA(G_;SZ?In8G)Z%GD?FJ~*b2LQkWMo~bIN z9643pK(@xKjHK@_qqynDuJl^|oJ)3OO47joCn&Z=Y8+tGMAmgmEPJ zJtAxdxDO{U{5Q}4!Z%1jfGxG>>1|C!Z!3-vhffRa!bTU?KR%yZKv!b>@-$C7^^(0` z2ldH+gXtuyKM2$P2%VYuUS~3@2JzU`#%bg32MGSMpl_yC-)1F(n{wn7-y_O#rj&e<^8hAxwrO&`unQg%E!(V7)Cu|2U-<9T z5?&0X;ko3KkhgcWN08gT6(u{ka=k%1SlR~k&}pke2U-*sK^dnoe#N6Zk9VH+lQsw0 z%}aI5zXkp@{LVeKA{J9ML&}4}4Np%AA>@LCmGN!lSv>o?)}BQ4fZ1EhHcmj=Qtq^( z;N{Z}9{LyZ+EAcZH>~dZtMZd0${CuP-o?lpqM|M)R0F`Zyd@^OnjxCt%H^BA8)#>;r^xQQd*_39 zvH8yjk!nvV2XFgMi!nDqHaa*T9?vihyv$mh|1ol)xqfK$2&PT@{JP@{SNNth2y@G# zV>h5wwnbwes?%J&DC`aus1Cv$WMRY(P%Omw?kbsDR{2e{)ahH437 zX*kC^AI)TSzNZ2#GdIO1#71XEVJv_L^27T8f%C0@gx#=B=|Yf47=iF`&sjTyGx6t0 zAQH9oACz?8KZlc?)+V{pklKe9tB-X{T{RLdS9~9`l_dWpRQZC&`h#~^KcZzfJJH&d zhx~HrRVLKQGdnTgk#06&XNSQr0iQj^_$0w{WNHReTQ`)v)XLq3U}%wh{bgK4xdEo? z_WXOV{gJEKd|Q5-^w;)`Qif2(sQ?=lV7AzToY2#dS@5t5P51@Jn-iTVEj#c7$Dcma zRG@<*QH@{yh~D6H87ztle%iHWda=7=-+=#0)snJrVfHhq?E!k`{W2nna%nS|bumh@ z3&r`NcJ#-Bvq#;#iHN^n)*@!!iLVKR6e(>#ci?+Jr~p5hG(dBJXN3Q~g&A?r{etQh zv~V$+HM)OABq$BLYzHDwH&o|A*!E(g{UN->%-ehQuV3yc@lFcs?UntWI~94+H*K?t z=s{eVCMnWP*j#rf{L6GaznJ*hqG$X0pt(@o);TnMPrNHR8dDP%tNe;&@~QhIdvyEF z3681TU(W77`DyV|Uf10|5EhlR$bVH+W>c=mIoTu6<44#x5c$ZdBg@|JYZ50aDkNP? z=nNfm#7u*_>c_>vlKbN%wW4qzCCRY`;XdXjop6JWc0q%K3j3)ql;NwM+`Y-n8n{n- zgiN5ieZkQ;GUxYaa4Sd!!c9b2?dio{ZIP-xMtO~Ai$-6bi@73bn0=lpB?vf~$hm;e zebvHR(Ss&B;x@5o;f1Mfo8puCoqyr7 zP2%&|3jz#hwdg@#*;3929-A5y#qd5>&2~~gOpoLm)r?HfMJt`OyuSnKjewRcGLZsG zNFKyz{u(z2os@;VqCa{a6)bRonR5)~Kf}}$aP^-1Qr*F}>ur}N4y+z0YVL2+pbsr|7NN~rD$&b(Qz zq$y)Hwbemay6>+d9mDe2Tdi$_X2ljJ2I1A$Ypu#;EenHS0=BbjRN&TBq={m~MFT%I?l;!5TfsGQ-^Ay#;?fm?ow=z}q7Tld z2Y${@>`e?-t#rG9N8d&2hJ@g2Lw2e7`<4#$YkIb!C9mGV>R~Ng0K5tKYvkv#^=$@)*a;`Xb&R zHu8nCmt1ZWYq1(G__bQPPc%C%I4xu>RI=6#gyADVSdmC#`7$flY*!OQ-HHd{L4{oH zg>RqP?}iUW;Y63*;_x#PpmhlG5i}8V07a0_{(JZ7kJE(VxoWV3k2bqCg&UQFUv7fm zSOd=C91kP_+C|sS8Gj4-2W=M81LFIoQ~#R1qgPei+mQDmt#uv& z7Z4Y*!^sqpS=(QM7)#~P_ZrhAy8U`Vsfm39R^R$S8uFo@Ly}JowD@c_t4r7UDvKKm zhP=>^UGC*&_!>54M@FWE#Y8&H(j2cgzL^raN$UwTolWv?ITHsEJ_Hb=iLF=-pKFE7N!3~4?jUbz$3Y2b5CqLK?=Oxt%6f34 zd6*t)f^X=0)2s=ZFnQI+C~greQvYLH#s2~0x0T>2P>y?lA-Ii}j@zV1R3LAk3M?sM zXcSeY(2L=wh#+r{=Z_)peR@BjB6hLDh{G@W+L@2ZZ$90oQyH!dDs~^d#$KyfiLe^k zb(*NP`&J=~Jx#Z3*2S%V*Ct7+92O`>g=Ca$i{XpM*q8znSD1TUUVwdm6q^KH$DlVB zBqn8Bw9)qOn@^_}6)ON~UtsB9^Z_2gS~h4?OK=@DO;W~hV$o5xc0LgI;HmV7u0;~FGA4958o0I>8ga`{rFhj^LNpZJ#_ymHcocXuEuqnxO zCIDwAiP*+R(>vQ~K{R6q&}T2db!U?cNd1u}=lbMctg2{^f#zy@DQqSm%u^CM6Oqbv za1}%{#}9W!Ja2#9s^-ATqE!i;3ORkMhzZz8#=7utmunEV)JW-a-e_;qDR} zBz_!Nken0*cr-Es_n65AbiUscVFjxlqsbGMT`Dp!r-1Kf*ZBMUxW`TqS0 zBP>OG-J>@8tx!B}H*P_OnC?<=kN8Qy#EVNgNj2m8fMmM&YDUe#Lz_HMo4n^>nFqvB z>&%Cll&4032&QWKFdYJOA zgag2NpvrIsqDX;$uhg9%v`B5x?+#E*QJ9y3d!Q1pKmR>Ml(NbnYvVEMj0k@4UO zGFXm7M3HPwGIYmw|18P2_!CE?3~LIb{OxS|VF;VPk=j}cS2*fEIk7Y9%0>&c>Mje}4n_A2fQCb37w&}`?tI*; z`6(77!ufF$JQgDoc{k&9CSP0d0fv&d!~j8wyo3C3CR;;?2_R(5HM34WoLh8kjLDXv zlhze)Mr29e5)TFpX%A#Fw3VQf^pFfOqr1iNKLZq@+GCJ?$7r%hk;PYZJfirjVJHx( znx9@L17gz2venUwUVZ&UQqpE;L|E8jBsk8NA$$zFMytwf{6o(B1 z9s!1iw%S_fvLKaWDhGKtUE{P3-FaN&{)LUnx7Q;-vlW0E-kd0?R6NQ9gR~1difI}6 z_5T4-O9KQH02T-c0GS0_PVDg*Au%oi0GcrY044wc08d3iP*X2yZDD6+b1!0SZ*OE@ zaB^>OXk}x0Uvp)0X=QURYH(+q`ej^O(YHSghT;@Tf#TZY?rsH&ySqCScMVdYxI=MT zTvMFjuEpIU!9BP$+~2+T|2!ivW@hEQ*x6^Dbw10#YbURZubY66a#FHV05~`RK<4cO zc-;ahNP62^0RW1M00sa6fC7MvumvEzt=?DwM1cPy_yOW!ZlQRV;3mbq>TvibQ;XhLTdrkeH<;Pjln5B3?Ey6(%>aXKF z@xa$E02T_|J9r9sxG#WrSa9%IaIbv;@;AYVaQ`8||Iix`0wNMJ3M$(BH-?6ffOl~4 z@b3`d5fT6EHE;oM_W=l4h}fUm#gK4RO_9I2eBuaB{Eb2>Uek@MHg!(LY33S&iiU?z zKuAPQLrX``z{SnO%f~Mu@l8@nT1Hk*T|-k#TSr&V+``hz+Q!z-&E3P(%iG5{^ha2D z#Lvj6q~w&;wDeyYnSTlji;7E1%gSr(>KhuHn!znSy?y-ygG0k3(=)Sk^9zeh%UclW z_Rj9!{=p&a;_~YH=JxLX;lK2HvnSmD(*H-ju-^1~hkyW&fc#&2!M*ePF9s|G#Lw(V z*kY>4rY<;NID%0=i6{Q9=|-jGR6EBtbDcuNqvG14hW(dn|IzIKJHSaQ5B1JL2&-hv4p3m^)(h>r~9zbkTr;q;{m&_*?&yiBQx_!g#H|LD1<=Zv%IfBD(*f zGJ?Y4Q6oRkv9ri_^-2A%`EvLbfJ*}+N=|*ZD;EFc+g5BYTF5vT+w_GED0-zY z2d1-R;g#0X=uugz`3xDbJ?ys9tCgRT3LZ+NxLYf9etMycrP}NB1vlFVH@wWzVwGpd zIG)NB+lhV>Ves*EUA{}!GMutfW7(!?R94O9tNwDjrWl^`Swy9|&X#b{$A3ZnWRhCl zf#7CRMc0-(L)ZT zWHS0WSN`2yaZ#puwS#4Gc3BUUVA5_Y5?~9;vz>d*)UqjQ)xDQnkmtwFjA|bxcF&Z= zLTWWTSm;wH`ygbmKd6u#*RE@i|52keUC->v=&m9M+TT(`7JjJw!cfO;_+fW?t8WS2 z=Z13DPn({*RjMz<-m}PliG8MuP*8aM@dY2hANTt!phvfY+^IqKh5O5s#a;gX{yNi~ zwim^7u|f>6t74Q;pB7>FPp=nE-A+JUhlTG6XaE@X&SvRJy__%RdB%-a)X3+_?pPE* zkj_y*4Q5rG&m&G8i6WF+z&DtGW+eD=0(~c75Z9D8k*(6pVUyfR{DW9o8S2WKL`=H+ zA~|HD-#%$d|E@s2H_U9c{@l^?t-!gHZxW-Rv*M;iG<6rya8$qIYP3?&>z-B$qdMTgTcbN;HTM$1S)k%-`R84Nbfr^SM029xtNS zO2~uIG6{N|Ep*twpWpR0Bph67yq-P7?01S*@a%<>YOe6D+?0v&y#j&*Qz9UF1F8%C zM1wDgvaf)C?aa1HC<0vW9!Vg;L!`l&6SHp{K0nN)AHlSf9;G?iWW^DLXYS%I;DcAdd4mYYD`0Z!f1ZPfG!3x4 z(9esWGrUD%+q5Wq^O(DyV>fgM4r%wGZT zie3Sba^pKaAU<&4?tcW-R&%XjFeo!zxG~0WMeI^8=7w%mGhlfXpd*F53>x<4%|t1w z>JJCf*lMNKgg)W#e&_8sjt+gX`oAPn!mJ^SuYe4Y_A8*P3^-7(-&@a2=um9^p(x}p!y;LZtF(cyTCB{79GcvYMy#FtnylE8FChr#1(|#k zRrf!l@92sOD$F}=fp%!h%wCk#-nKOwl;omtuSCeI=9)Dkmu}|I zWwnD>pALN*J~o5J^X^h|(+2L^wU8ue`1h{*xU*VPM1%J6S{_9474SYDGWd+q()ofY z|K>F{V9)kl%_~5{v+B*MYOes8W}nePx)vayETm&sE#p4ez=P}uN!h+#%W`Xy#teUG;N2Q-}6-@F@6n(Uzi?*uiv7)T!zKxL z6Wer$*+zT^n4EKryd4FblI$42d;@5Oenpxdn2w(f)gS*}vaQbWBb(7>8ycHPe zQPP6iro;9t0AKY$w?kmAqyaV|7JpA9t1Ri!+8vo`in0FEc#C_=B?Of#%xic zX)04S;TC7=eO^7aFtb}-j*eXrPZexkJRI!DjM*!Ryw3aY@Ty4kSGsHF&+5+{zQO-b z6pTq05q`iVo87HdrA>*TXexg>I4UT`kGCKVK-5*s%z)m=E4ON#in?n z7Gitq)Qk98p9Jo_QIwSOAi6XBH3PGU0{(BATUG)&>!0L@S%LmHL`Uo%kjjeJ#t5YS zJX|SLLf}E>6HRu#9yy=zhbB64*9hJ0GKJ{H2Hkx%UI7Fhj2W`={h7@MnD@TvGtY-f zz!)HI0ZKHaU7v6Jx%@w-wlWobCj*!P;ptQpl2bN6kV!gdJ)J)PgF33uICQZ4Yo@#A z;n}qY4ZQ;7((_br%Qf!$ValSs(cEgHM~BKV5M8)-oQAV90?~R)lP1_C0&Vb1T}LCm z)ZzY>j0mT{vT-LQM>XQvfB1#_Paxxy3^)cK=2-EbEX%Qo)Yt3m!00Q!IY|wg_vTzt zn#?&d6$q}GpR_JJPLQal=w-OleDy33qCFt-S>71W?h-`6=)o!oCVj}0I~MxQ4zAe4i^uDCb!yVx;YeXpVLr@8cbtp`yo-Mj=8s zHxFn$M&%$ew>}H|;KS9v0A??4RGDslW-aqfh_od{l+53d?8}_Vben!z?Vyiy8QPk{ z4~C&G1|&*8(g?Zj&MmtqV=ekOBiKA#ET}=_+7F#R`iw!Ctyo*CfUe5V&)=)?`j0} z&(nTnd2)wEQIiwfGG`8-p}YL*dC~-w`_fP&7C};eS~()xIS(VfJmZvY|0JELMhD^C zH&E(#weJghVs;|(d5mi)bs#Yx2g1smfS>$b4gZNybXbylPEZWaVxxfAnr>5OUl)feS3W)uFrwnn5w~SC}X>viq7hQx>jgBg~MKDj6k$UDXQg*Q7 zXt|emcv0VGCzeT7=}5(x(r%|id@!S4So|d)CI9GphpPP`Nna9*UMP{!@KBC<1z@?K zu$zYV2}G~`>ty7*Ak9D5xm=jJthVo8Hqg55F?sy+o#XGmrV{4;vHftvd$415VNN7m zJp805?#brold@iI%R_JeIqM&(lA~m{Cl>WuYorHvy|C{dFvtI`5gK{t%`Z%E@L{Fm z6|kw%`35tpZ?CzsvSt^i4g>vtwVzl-qsgy$?KbXk|0|~CMA)Bn&LAiH|5J1-#=LmT zN?3MJ72)-B6aUjTC!FvOUjhHkuKoEA1T*_~Nj@M@4nOnEVDJjqH%|3YwN}%nGOLpG zo+?G>Z2En3MX^nk;ikVv)Nn1#`GQw6fSgcB#x%DBfCoDk4GQ^QPY{51W$A8U(r0mi04?{hc_Fv{hu_f(M8RaW~Js!%hm894kQ!|JE)otV;+;T@pB> zjsl78@wJVyh3G$(FnDB9#=8l4?h6kQa!>f4EYC}&6{qd#mfpST^Ju(QoC8@?GEk&h zoIkWQfhYUKP6)pY2EENs_^CbekTE?G)Ybn=V&MSqYHO8Q$AVcWlNdmrZOSVEy4NIe zEfhERX~E5)PtDS-mx^DMm+#PJLcvYY{d~2lS#eBHvI1>ov zL)#{y{pxC-DVBd}N4=EwONfbET4f8U;BT62uDTLNcpFW7 zslq7UdOID#ecGXc|C-?S4M_RU8mb(PRC#pp}kzv+H$Lvo-&g9(O>*q`Yw75 zNx@*uW{|W|+dOXl0zQ7-^10S|5NN|t zA_Ct4OrBapYi;K&S9F4rx4{4^aU_~I|J5MH0y`zg>Zhx{iz3x!ZJVEu3sQqisj9R& z;iRtl<_8nS%G?bO*62Es3)DfQ16|joDE;={qGAz7%>gveZD1Bnt!V`dri>x20U7m) zPPwV``P5c58y1{*i;dSuw*v9wjjBJ)Cxqh#vD6$M`5cD|qei9b@xAOOI-fO+`_}hJ zk=tD|!@&XN@Fe-aDEYA{dg22Go)tZBAN23yHJejw6TS{m!T(fVmaRBLR11&j_|MB4<;7cM@H7wiUIQ-X~Rw02WV zR;488s%e^N_rk>vW_?41E2bSgNCH|cTc?2@{DMI-JLGF9XID>>z6;@PD4G2gdUjeH zRw@V`ZpF}Ga0pZa}pYtH4@gnhkHnXM~l?o5#BC*K~C-lTvsZ=B)_WDr&Vd z13-|_?GkH&5UuVy>K_QR*x zEx^o!c2waUzhUtaQ?I6Qac#7G@fXBQWCowY<+eZI8Ipt3y@B$?uuPL@YU=_CP-Cw8 zESXpZaU^Ww(WfF|ExWPfvl%?Wtn1Zbp$Nf449g zBsHJ@g0+||H!T|A_r=PGJvIvXyZF!4r1to2G{%dG^fS9!tyeVxMm!kSMyYwn6_xc6;v1j32OvJ5TZg4rXMzP8^p>-#-b%JTrd3p#;scgIeYMf}h*D zG+HW{;wZwAN0Ve2rej41m#a-T_VtZBsMv6!37ofXEA8${MJWfK&_TkifJC6U|d^Iw{#*P;g&exvr! zB*tTgJ2G!KG}pCl5xK`kw6EIYF1_OaV=54 z|AZx2f>CerEVC6ZEM?z#-WXkO>>V#RvWwZc(sD)}Kg{qvaCEJAreoyc9!DD=%WnjX zi%5Txcm=#6-M)<*+_&O6b!HIYN&c~O8wlfl4s!xt`OPUZ%B~~MO+U43VglVxj1&t0 zR8Iuv@bcPZ%R8=VUJENvYU?^!Tbc*Ib4q?3%$qL=`1dSh?D#Ur3LMybmg1_e4&9Y@ubX8KU`1U};L-c7m z$50pX>P6lm`s_nPS^i?A?rAY08sEb_s)+m2mv*Nvo|;ud93~?>{c1J#C)Xw{&|Xpi zyO9-jb8nKcmq;C)8jEZHoYAp%OWQb}uz-G!$%-x1V|wRTF3&5Vw!KdM74V%PT9Qhh zK3e3vIvsU{r*@Z#v`glLll+}ki@%eLPH%-ebvu*B()0;)Owv>LDE!!j$U%Br)-666h0C}LXLS3LPIh6M zM6<53BYs}hsyY$cSDTN}S3s3f5ka+nE~1a%qW;e`o3U9#vHOLiRZU&1G)>9GCX@xc zM@+fdMn;VJdBWIn6k)WymjfIch?!AL*!rNM)<*G!O~c$lVBE z0cg9-vu&`_?KJr7>Q2qzA|KDo`X*&_T`fP5Y2_uJgrX4|yiw##O1}nohLM44rqDetT%YK%ZqaT_+fTmH5)o zZdWPtMc9aMG5!WH2w}7J*Fp0x(KSsYcLE>G23}WW+Un#ys=flyiALP%6`yEZrO6`; za32TnbEmc@2yO7OD}7J%%v}iYy%Zg5vEusjeg*n#7q8y&ZWV*=(jPcRi;PN{sQ)Ik zSl6((Vi)l3OZvyuNs+r^M6}$9##gzw@D%x!`~%rDmS+d_NJC$^x6L>XdM!FKeu0{o8*xt(ypjIe9)6VR;k@j9 zf()!AnDcG^*r4b>KeQGpqE-M;K(N1CAnTp^+<18}OTmfr=|$NeC*#~3AeUjSYy((i zuC)2Hlyl&={XX)=vmMUvVe%+ODbEetGeqR_;az}oKBP|TZbi4KS(K}j6DVfrksM7| z^}T?+J;7&OS~VB$;UO@f;?VN+d%dWj;;dJ4>!GEA4F`gbIK+zZqZEe*S=LIK`N48Y zO@zi2ZMY7V4LPkvXj0W=sL{@rAvQf5m3oU&egHIg`z{k~4w86|=#0 z6g$(!ZvX0F=Y(6tQG4>f_L*kZeNX?Gs_Ur_Jw|5Hifm~+ZgnC4&{Nsgh3bQd&0^UT zjpFnP``J%aF#CtwB9`EG7kg&`D&s9tyrr_jW7UPtd$`5U~ z$Ba@6RHrZRbRlplL~Wo+ZcpnYX209uCBzm(B)9J&kC=ObJd^w)3J}rTbV$BS4T#(c zY(6n~WTa>4MQ|1amB*x{PMtT{%4KI;PRi9eB0-c*P+kEYVOkYws`s@VUWNJ1qm(k8d@LmDK3g>yx z;Y`3QUu|T=y`GJG`dpk>z~BD(mw359=+BaF6-Idv-f!ck1h?xPk`ye#j`+nxf8*iq zKU=@8aYVVH5Ni88^>BTB>Ka*!5mr(cgR2(%Q?rU}5=cAVNRRB=mq^!f@!(VYxoZ&8 z^8PxmE=_|hE19G+XH19~?M5TuXuZR++J%G*s-y=Nq+E;BB@uUM)j206v*kMaW!0cg zGVqlkJB)HNcmelhIX7bSMEV;TM(Oyswa%bfl1ga_tz0{Zuw5FeeU-!2TQbN}Qqng; zpv$}jrlcgKJeUz$;D#JjvoM>^%-p)xa`TLz7_>;g~W$xMi#)q7xs8J=uKhkIgcVbYg zhs0djgGo)W8#@VD%Gc3*S732kB~Sz)CSFP_oJnfSO&yB%pP=C8$OUD zKPgNbX@?pd?b0=6$4!3|krF){jPF zgN7+5vRt9f%C}bla?{%HVn!y-1Em(<24LpK+3dYTYGM>yFMT(1&8K}xl6oUIQ$V== zqL9?bY!h?jXVe;4Jmx!ch} zX8%k{_ytnaEVTIq4u_2wn0%~@!yOsK7q#PL<@*=I(R_Z*tgEo zg)7sl>P!AVFT4fp0l^SmeX?J)* z8Yz~g%ClK54gBt(AG|{Gu)w?trergK?zp{AJO*4n+2@f}g_jpBWjD(<#pLgS+F}F9 z))8xd{HT#(2a`vTQmrPyC^uOfc> z^IL)rtgwZeL^+xtN9@yzK4mG|HSybPsN3n&)(Mpi8rx4>Mal2StQZLKevDnoD)t^m zh;I{IhH$pXce7hyzeQB2+>d4EYJPB!aA~D1aUucm@+qEA^les}M?;$y(oGC^U zwhviwEap5U(p-5iSLE3Ncy6aAdik$!Uf+V`DWBJQvh;ZthF<~QLM&AIMJaQsr*nC$ zuYe&KagGpr?RnuQ*&KIxG}Eo9RjmJ7p9yP&SLc%W!_HzN8i_?>*G<14ZSV_uN8OjA z7_0R@5=GZu*3YLEFQkZpu`zu;aakQ3yMe0dDix8jH}Sv=zD|TY?A?nKikWImLS$C=y- z7ai9kT216$7C!>i<^iq3w8+2!Kt&@n0bxv>qKHydI?`$;US4`%?KiXbUToC7JkjYSLNK|vE2XhEnIZ@JzoJH_}nmXOp%bJ*5k^l$m184aHNoUO<>mS z>U10LGZ-@Jz~GG0Udig^I(V1Y0o*hQ#4QPMAS=i+3=z~YZ;&?deOC0$#co2i^ecY_ zjLLpT&~M$Y<9{&&uQ>n1kK;7bu;XR)6sgTYX>Od~y$!C{xf;`BEakdtmE>p4sb&4G zP0;&c?rOZG$TpYmFC!iMcmdp!_>ge6yQpfv4|(XOmh(zu=EH~O<@aM;#oGxSPzEsy z=yFYF%Bd7Xw&@7;_eESoiJh0BZG;ozoAm-fEtet1t z1u0lpW(3=NVC&MCZCujpHp%cnAm#>Ntv(!OH;LwOq>?>J$H%wES(F|C+Wep7qp( zHbmYjge}Zi4=V{~raZ3~c*%fs`fCuaADQxTM(Up7v@j>>^F4uI2p`BCC(3>9U|=J1 z&*Z#;Q8QNH+ORFP6T2{%=FZyjMLPV-c3I2v6(Bzw#s5{bmBa!rc{RypR$J^lW^6T5 zQU`7G+Lx1W7a0@=&Txy(@VI+CWFK2v8aLjZ`eZSam5Usb2?6AQpU#a*IJ8_0`a2w!uq5h9?u54JmqSJx-@JeLa#}O@-R$%s-tZHK} zXE*V@LZY#$L$)DHJ_8C@wcVI7F0mtC#%?LWZ(2|0k_~BBn^2isshv4%N^4{_ zzw#g97#1ukqiL(re{fv~8cWgHS~9Ixj(&4@#xC!&94IYPWmkT4#M560EKKFWPcNV9 z*LD%+oAio6315IY{^#b@O+?2+7**>B3lCw%8F0CWhpRq7p2;u*%Zb`D+Vg6;gk9_3 zY6dAJp3>`HbqoU>-7uVs!=zTrpeAw1y>Cn_aBi^!>Uroz)^83m>hJ9(>rLgpn*1$d${$DpdWK zvtI!z{glazf#I=sWG1boGz#Rm>G88>E%UWuObAsmA<%2w@xxyZ9Nfa@zo0PVmj(x^ z0ymlMKJ|JOe@3(AxK3+Fq^|)dFNYX|SGXHX2h*!-V@;0VzW=Lw5^IT{uN8VXsAxZ? zCKJiA(q4})4I5&Udr08Bss1ze!z-4`A$N)NoF8}e*VwY0Fy^1FY#}3>tctpq#N80Q zp9sUtIxpE8RoFfp*nuXa!AH3b0h6e!ycC=tMT+wi0}WgTWS|S{Ee#H2YPD+d8PbmG zxpOn#Sn*FZFT_f{nzf?wxv7!$pSsTUUr-FZp}i3=e;SP+R8kAEhPAY2|H0Hb?ke;M z4}fmhZ>F0Y8!|J`jd+8Z{`zzx%=6Ng9pH)l=Ob;w-Td$6r<8e_d5rDjOP3u?6`;JU$i6D-!w=0AXxF6l2$_kVgeaP_!Z*-&ZM_Q0oGQ_x>k~Uh6@~h%Yf3#U zN&3P15Qjhf&g>42ezy@8GXK=K(H}Bzej4$Ak!C8PEaeBKK;|p2-me4*_BqRvsIw?k z7ljMsl#)kq&)vD{J8)`8kLXIJdp>H&$xwKjpbC*ZYNtUKBo1h1uaq@>F{8&a!lBAq zmwUN1eYT_Kq%y~hd6TSMCw|>{nIesp8A5qXXMj_RhwQD0(%m|4VpbeuyNd%*30-gz zixWbyPU${ew=jFZPa!^Q@z-VV?_5ladxYHwFSyWME`tN<)TQFQT|XaX%6_`p<%z4u zf8;;&Fi!os+1h9FCTbGGRZ84?L)be>e~QwR$8+}q&GnBSe*FFXacxaH%WDX$ZTL2^ z9X*3s`D-71_t1@?C@!{8bluHQ>2--&U+9rRVCPn=II+t4)uFjT&5xW))WK~fz7wNx zOtvy&@y6#@z&}nA@*=(wckYg(vz4>O^BZYty3GpuZUmDM;R<;v+DCEfl~!k$;L}3w zavQ}hVgCu_n5rTv1D__(4dy=Dl3&^gG9`Dxge2K93zjARcvF_&VE8}U46Vz%;gd{q zd76@gbHJn3$DVv-*Jm32-7`sPaT{|P=jewW((SG5Ha?4O& zE1}==`vNuvdbi6(h%<+hK;}Ku1lIA4NFDt9qsf}BH4=k0N#Y$i%EWGHF{a%^ox}`l z)rm#Yp=73bpgXzO?mnh^^M~ub7rgUqpp(9X6eTe&B7uWogxv$p`T(Q1jK=%i=9h*$ zUTmwjOBF**U!7F>HR&IBQ6uxZdh6>S6PvyU0Sd1tpho;4Ti7t&ujhqi@}Qnq0Hv{l zeWoXEVv2uucJP4x9<#sfhapQT1#BrXLN#wnk+~}&-k*+0zgDFiRic(C^CDyfVh_N} z@AN|gLV2lP0bNpn2klSGIjiaY#Z9R~;IBa@CM-a8E^8ypK z{2kuBw+iRrB_pC~LcdPHv;bCSmomnk^*}CKL~r__Pf+N^`7jG-ULVm^%^19qhAGTN)y9`|al^|T8 z5RgjQC$-Oh=k7$O0{Mm;1PfsAnov46Z}To};I1An^ZexMbrQOG&N7kB__!mu{qcREP~TnNXXmMq&7w;%k`U2QN#p9RK7VI2Rc*{I+xMJLm19@&+_6`$ zpRCsnq`tFTk!jFqnq#Wow)dejX+7D#&lFW}=r9o;^HIJLN|3pk{<;gHwQ#?p*bET{ zgH$FxRt#AU&$lzprTio3?Q@;`kcCdhorknT2XPOKIL^-&xyST%{?_Y-+}P(dG+LVRuc#j6t%=#-^mFw7V&m0{M->*Kcy}^+;SwA*J}Vtjp9C}9xKG9t zOj_Y!Cp*d3Aj*rr9&;2=8g6iDF#F+S#|8i(9t8>H+9%EU3;L8B3%A#Xr)jzo3=w{o zca`lLz|_04Rddp-ZTh_nVJBw5YJbesUZI#pL0Kpd_ir=#e04!L8vwHuQ~bkuJCG{4M=oQq806{w}+v}_j6Ed`Tm~# zb^3k}(wOmS1N%ug;JpL;VLCr^w$r8ZySCiJjpL+9-WM1#rA-$B$o4Lkx$tdL^w*#y zb~Kqi1yfSQbA<*U9U1wsBr+c>+yZ)?-vfNZ2c6DAwx%0jT=a6a=L^Q6yym&nW7h*! zPWJY$Uy`)CY=0pjujYod9IY@%QxYP0ePqWZbFE1o`05DF-UYvprBn}*1kwdhvI!Bn z^}B4RWW-sT9m|d@u@65n2CG*Qb=<7Qjr+Prv`jg7b8!rO;H0$p=Y}JtWHD`v*4i{8RSKo4g~Vem!~_8*fh)!ChV?kb%h09B7~(q$_@mc*Vb~w@S2=G_1u2 z4~<)#ekEsiYr(CKFQI5`O7^c6-;!V<@r}79H}3gERMeQd^{9+{mt@_R36*K8i(31I z=aWHFaf;0MRf1h8oQhoTQWRlB#-o{RYqaiE?VqcX#v2CFNy{br(^w@sbZLspWJyo^ zC+TV8+f+Rxr4WI54=3KZEZom-6V4#-eqY``WQg>0KO;2%pWv) z7-Or}w-lW5W+jQwK!N@y7)x>m?wU{}6;&#t(YN8v&6v%^6XWa=?oXKkm@9Mw9`pmw zOF4OA4qD9O{4zVfaBto&J7Fa|Tozfxy;Ke-qDdIhk6VIXaUUH)vWVZi;K4e{8u}T* zIk*R9Vy*=PBFx?^y?WL^PpcaUv>r}Ja2x7_>-zR~tqy{a0hwFYpFxh?gs48;1o?>} z*Bn7!&w^T3pe+~#NMLX{6Y733euDlL8*D{|a~Oau_n_Xrfmtae3U9wolg7GAbL&&H zBQAV7*9s6kXWT#c)<#CFkcy8UW3MSg-72ysZL6By^T#-!ztm3>mey0pD32Y*bC$U_ zU-Zq)gRu5LDt?%H;@`A>|CeB=UFlt%m<;mE#KO!xxRdzFpmm`dIl`Zvr13$WXV3#d z(OEDZ5X1V%`p8)5ux7t-nER!OHMLPgnO;OQ)hFiiHZ|N82?bQyyH$YFa@`QnslEDi z)5zhmF?Ze~>G6#3bl5FY#t@Op*5AGQk3;FS(7LnTHJQ)KjC^O`RSL)u1yz~BsLJfA zdq4Cm63Go`KzRiEobOF>E@`%AyfI1_F7qdB)xRbs=~R4f0=cB^+W1Ux$tSg1C8L{|y$eUW4Xzj)N0%Po_lMY^jWRl0$=^ zV0yXawGA)R^+`9gowKA5M%xBu=$z&&00#Nfkvn$hiJ2<2B^vl~4glqbMupC(U;R`s8^aGkZFBGVd|qAS zRK%r?iBMCK{*$y)KikCDZ=uznV(!f2{nDJC_tj#eCS{Jc=PLlhkM=>hLLDMf;P4#% zXACW}pg9J~bl8AyK;}T(AIo}2zhc0%0KG;eN0~cEE|Mx~wpvy$E4%1|`+CN_e93N7 z9FCo{qM79ED~jsN*(E5h&Cni2i_DDeT73e-c&Kk5Px~#&Ey>9$7rKGX$@U5ulmVr; z>04Y3{foSfS7JQoK&1YW(!yJ^^FSU2)3kHqiNTp?mwmCn1-`ZX8L_#h5v$S3F0qjMb;LX`^X10Ld~odrL{(S8tf)(<^{W zF)^gjP2UKe5Jr}m8>-sy3YaQWu|qTs-m=)fqHIv!Y*dBBQh)H`ey$cpO>6hl)qxhU z*ch!T8k3oLAN9RWs7-kf!bfl83j=Xmoa6t&UVs9>y#k_Jy}1;Z8X0=T2FFVTFBjq0i_}`|n_)VI)4NN7tV53ykQA&u zbYez}#aNn~#WcOa5;#X~(=Dh{`%GxOhC1l&jI>(sN{tcXj-2Nh1727MyaW8g$f z)o>L6ZN!A?bep;?FKl(F>El3R^6f%MU`X5DT7R*-AGalY2Ti6fcNtR=aSScvD*)~l zfMw)Tp}g~xlGLGyPo!KLsXesM(@ar`v41Ov(DJj{?a1i0;Wz9)K#Tl)%_ z{!vX$&DZZqhG^#Rv{m;CP}OIBrju&`PtY5Z3`S>zqCF-gfNpt@4|B1f@Nu%mOPtE~ z;jX!7?$Tygy<7D(6t_3lNIo=UH`=w=vqVvfUHoQpSxubTr=Ke?Xxvl6L}Z9{6BdeT ztU>*kw6|a*`VZ1nUlK+lNj}2aMfy_GZnb_I-1oi1#8Gs4%+BUQNWF|_8*Yf;wwmF> zwQxnaQiy)K&vj7gfyc?apM`SjSfYAy{20^sa#0L1{?FdMcG+FVhUzq>wi=Ppd4P+N z$xr`8zJrjGqxrw17B3*{_KjU7-#@TgXD=<|5SgX|mZfE_0|J^>*7t{(dPdSTzL`)> zQYISjyT|;*`6ho6GuNoS-_@Dr2Sd;vUP8KpV<6_Co>2{g_yfwW1w3N7i@`)+&isOw z**yL-R4~=;R%X+@gAu2vWGGR@DJ2RFQO2`Z+32^n`I(4rW`ziQi!=E6`8My8F^d4V zkhHSpy|R^!=vXqw&A6DUnEO!MA%X*AI`2&AXil{`KVqcQczer)#(a1@(5=#dt9Y}} z`CS%(mf{2fKI+^UOK%kC*ZqAq`c3yy=C-5AnpoYR=@L2D#x?(cvL5)__;^2SlS5$5 zTkRJplp+JI?Zg9ln3m))?y17Y9C1^HQ!_KFQS@_X^*Asx?l!SyeM-SOiKF_-fu85a z;sV-~oh%>otwkeT24)NII(bx{@3j$LmKM;L51O&inr&R}=U-*j@#sS?FBx!dt*a^T z0Jod*snnw--C1SNmP*u}PvnP-s;#O`sG7R114bJyisLTW#>&9tX-D5bZ*3SEROCIM zMH`x`yuw21rGAlK%RYQ}LS=numsVEZ>lyPs$<{<7NOoY=@12X0`~KLl7_SpJFk;da z_6e6(^Wsw2-BJ0v=Gs4fUJ*yEaHL5r*`lVfdeI=H>WLpAIYGM9e<9C1dW$@~P1OXa zcdEbS@JCYh?=)V=RzlaWv&{R${?p?^{Z_#Yha&anPv6-@4faSLv#ltm1v-RP8iUaqfuHRc6F^d`a^CO#N7 zjQbL1q zn}n1IA92ir`hU6QB3F5leQLNsx-Al@m!@?UqUQ6Mq4kU{BeH?E48R@aJ86 z80CpP1g0}FZK+<^w34P(d3;Kfcf_&tX@+kC>~`qiHF?KF?fr~7-BqPODTuZHO zw;CSy?8=CYubgasrTKoYL*|laguG=;={=#AtOw)1{+CDZtPi+QdKX{P?q&HYGdbd0 z`p7ctz@CP!D~r>S`t+6jN`Sgd+{3E>^yw<5*$2^Q&3-#9yZPl7c_$Yf1|IP?!A_(@ zncH`T=Ye2^b<(Ig?eC2WX3l)H$>C3uUcSPYVx@&;ADc0{iCtwte~TW$TvPkgX;IRD z_K9N0OA7UiPRjKS^kXQ$xe@2XBUmQ{i0u}(&wgo-SDh(vyNTwKP7FC+B+f;?ZG17- z8-pP=@kC&>E-!sg+7r=n#C3U6Kz0&a|LaeqFF(l*!ro}SY0LR(z32Q@jCK)HU6`db zh`Yecp=W%(csY4mYVa$3)LNfVvc}_ z%}T|?XDO;WfZ?Yc{G@6P5yjh(^&DodI%e0C{cFZcO#bhyFp?PGgLaCyabJ_xgF%|x zpV6JZ8iQV!q*@x!Q`%W~j9KRZ`|$Z<@Ojl*`)&Byvb=W@qGlZW_r9kXWO4qJ3BAJ` zM^~@a?Nmtus)ZKUw-zqM?Xsy%V7y-!a^S?WPep=CN%5A{WC4f5bkey0w0$m-H~@S@ zCelai?OuA=Pnz@rrP3tYQ!75G5n5{kjoLfN&gfud3RiCs+VpNm2*iPbijheyllz52 zJ5BXeTG{2y@I0?RG+qhNqkrQ9~3y*@(l#@3ENBIhvqLE#UHgd)CRt6KR7hOeCXUqPHDn4ylF z<%3z9nYuZhm605Dx+6NHCo?1f=Lr+ZCFYPJ*4X1NH|NitwCdbyj z4O+r$bRfeXH!FI7i5;)6;y&7U!V6~&#hA@Mc}lMU1?@(Ep&XjziQwRd$oY4e;$+xv z`+;X7*ZYBV({1=ro`zW~%P7c{AwrOcV$}s3slFf5@YSI2J-U?p`CYYKISd%_#DDdk zIaUsJX6l}tTz9$E?q_8y8GCa={>jkD^)cMhkx^`+4*r~KcP@1s5@RGBT#(JudoO4b z@vM6pi0c_|MeI?-x&Bg5kztg>!ip+V6^PMGJ706m-TuWoPJhWVHQ)X7r@v~O;kF`1 zE|cgFM7Pe(58N|5s~6>4cGJ@jq-O)s1XWkDJiHoB;F?yRvF|9WpLz(`;A5MYkxP{F z7sB>$nJs~cJ@MDBWrLHu4gQkVJnbOq&u&!gfbzHLW7E-0nJPIuc>(0xUmw=rr#r#g z{eD@5bF4-^fck0h7Q+DL^w&O2HmT#l${*usB}wZswK^y z_V3PAuJH+eyEaDhCK{Ctu2!(p{b;!j&$LKVZe23Fkkt?K-00V9zx{D`Viq?dnS|J2 zB*r*2d^$+#N0xeN)dYKcO?1w0@GQ~5SS#ICarrBTA1opX@ub=1+M>Q;i+S|Mu_e7YI+4!E~MHEkjxyCn?<0p4|uBxZ0E_{ z{>Imf7e1ZOb zVCM>c=i6BN^V{lI+zEV(dd|$b)tN?37M>8<>J_`?E%JQ44d;;!J0!(gT}0a zvw$RY09E%ZAjkskkQ)=*TUtC7_ZnrxLUre}h`Hl3hloLt+hXQH2~Fzmv40%*I0!7x z+)3>=%T?itH;TR16Wlfc^iM~V%%`O8F2zzE8Z)4@yBA#3%U1vt z3=%v)EpW59a|kww`xHjF+uqbmMkeyL1$24~y&wjox z9wj(|#n$Y>H!3mq35gY^CJg`mOB#W&Q1WCv{|eZ`>zLM-%X63;zs|(FFJLRBbuk+B z^1UQ@p|EL*p?p+Yme+>g^&p-R{TJBq;HYjCF=wu5>$rC*ie;%=$Vq5+m2I)UQWU_F zmzHU8fuS|4zlPKtXZX{V^GAaDW1-6(QSrQBxb?0^6V^YB~+R_7~17Ohs z8C%%SG^=^WXTyKxB91w;Tb)Hd>t*MAYEhJKmK4lE;)#E=A4qO)L9+b=H`)rjzvF&P z$LjJadUYQ9PIR9r?e;1*)VC;>2F& z&Ds04r6b-ZJxaSktK?$JF(#L?BwI@`BbjQEV4}FAO1a2aSJVOf-5NU`A3tx#>XP}% zb!QgJ;fdMY)k_eOVsTe=3QC6wmhfT|@B@Mf#!pbd~zO#gT?HGX8TCBixx& zmZr*dTbIB3-+aGz4<(%LA(Wzh;NWnHAv%8rL^O`MK&m*nPyB3v7aUR3+@`v2>WfD` zJwDLXn(k~L$SsZP)2T&9FK5kV{`Ges^gG!IShUPt=@}?} z;Z+~RyV-9Ib+!LEphW^Dpa}E;StJ(~7_IQjQf4@7hjx*~|K?sSYZjLi5Dbjj3EeHt zzSf@IcjTfu-vzmtm;OsG^qV5(SL=ehoU$Lxw~(@GOjSCR%q*HQ{8#S&E8~hQzUlhDh084(6z7&J$zN5}q_vzXCR&2Uo_!cx6WsPTOv;E}tg8%wLUq zMOw@>5KzZ@V_Ad`@CjW{DD{!C@l~s>^Vf$xmSSCnGxb$gwxhP+uO7g=9q|GC31V9_lG=Vy<-o;lWV_7 zC-=Jr6Uxu{1Zy)C?qmKdrlks%U#Li6X@rpWqb;Q0<~;3QB?dWTK28_mRNDixT@#J= z<9((nIGG}u;XK^DYZ)RObJfgNAN2+kN!VE~1dkD*mmtc_`Af~bVMcHqu}DZq;r!Jj zsdB%wEeI_O{nujYemB8R{GY`5o;J+BhqRphv>A7)OH1XHab$kd3t*9DMC!7tUWOS2 z?8N(r-{<7Ge(dHpuPqC3Y_ySAaDTXiZXTH*LqWYPp zz$4?X3wq5^M@}t8n@19jmw@)64aT!B{^1GVeTS0VfS#RU3P)s-+=iU0UVA(cgRTMd zEM-`tTey+4l(SomM}-`^3csk&_}qinOX@Y>FEwwU@%4zj&>Wq4SzgB?)DOF~>!sst z_F6KeNIF^uyB&@k%H|#Iggq})xU-N};5*>Z(N^ap+7j?UTpL@!s?`ZD`);Nec=O+f zV^xXW)E3ep8JKs0mbcWf;`QR%3X(N0s`u89y1#K;B*{n5vSn@Gke{eChtv~q9&;mJ zGMmU_-PS+L{$cN@1>jsf#(0o5^zzZqsh$QXl1R@v#}!r2#WH$3I0C>{rp*@C)r=L> z-?tKb*_haz15>m9*v=R(_H(Y_5PSu%a&{de9~l;UChCamJ0h*-Y$&Bd%2)|BvlJDUT<&Z+6zQMDtOIbuO&147a;;tClB-`l;M#^q5 z`c$qx+x-Y6oy(spXhWZt`1IpyAKd%VEf;RvyvQN6j9_{vrlIrtE}Jlpte|RwZ-4F; z07~rBf3td>P$h>0x6%ExG7nQYsf!^frrw09K6+H9PwSM}j@7x%(aF{A zUK50(8&IN_td!^~Gc?ieDeyG4%YR_%AfjfBxn`dTpb(=l*1JBzSDUNa@>`hc%au#| z3P_F{&Ap!V_UpMxtND=#psn(yn|62^-s#DwCk^vSWhki?{?%z!MdPmu#f;KhuYe#f z!7u6S`FC0+xQ=C~IrBYyn5*Q*pzXVH;bPudt#~U54<72P+F zd9&&SX%@kd$n^!r*jv}Ob1K=OZ)|IF%s+SbHVI&OjwR91NZQFfdA95P&lT=bCVyl%J?(uQ}z3 z^F@8yGx_zd|4He}#=E*v$D!tupf{h7saU7x#YjqpAvsp5xY4$Q^ z5Lt3`_WF;?nv~PcoL)j4yVR?wl@H+AP(cFq!$3ET?mz>r&HDpr!u(MhhKi;jGm&gy6_-8Nh^RS-Y~D$*ce+{=#eltgzh3A{cOEM-3ZWx#q` zLaO7=cMadJ@R^`hma`5^ujpHmoNV>{#tY`!1y{be7+Q6?TTFt>$^C-%CaKj_2gRKg zK^;|9mG0-EuEAWeX?#y}Lu1G(HSgynO~O?qjl`MIxQ01@j>&oBpGJhuYdbyyRavHD zCM#AFdEalh=hVYk?8h?94$Xz5V^J!&Ue0ni=Hf&SS5Z$LLrEH*V5!G!UhsDd`QG1u zMr3{Np%@kk(ngtfYU!x&o>)9N?ckrSU$awGqvz$c;t}^|zcKD@+B2HT_m^m#+YRqom!(YG zJh2oq7b27NPo!!sWF`&>YzXXCZL3fJ*N`=m5aVdv#<}mO{ehCMi2JmHOF-v<=KflE zbddZbHvxCql`!^kb*e2}q$VG>fb;R#?9u;dRmGm}~8aoYBc@$-DL zC%Pk0{b7mdm|LPm)K)IrpWV>jw#*ujJvXGoYM~Rx=oodXVRP zNh;sXE5g{AtU8#?mLgmc9R9VP*E~*UU*H4djF39je2Z18AM2?5g?L@+6?tHhtQ>=( z>OpfmMeeS2AE^jbx3e+o-4SU^ctB8+lusAwk>CJ++N2Fefo{&9X;!ANJ+~n^O0u~F z&mE_NxO<1SUUU$AqA{(%{ojQ)Zcwb+?68x01;54TnnsfXU#x8OYoj$g(+CesYJ6*! zV&#uzbo>>iSA5a2$R{cSK7zZJ%oo^f1=V7MB6I0mn7BCX)*{yF^|CWg zBV#jU;h_Co-sYy~`G)th@VuO*-3Sly=OFR+b5O*S`tPvG81Xgqq>NvPRKYo|ktDf6 zWNz9M>gdmD3}i<~Ey9?qX6X&x43y2i2??BE=@7>vTcxkE$egSPgJIbXJ> zUx=SP_8zHGO+$d?(>oCr@E-Rm`Fn0d@g1vVo#w`-CYh(E=9X^RZ`mkRS|(J;Lb>_q z)`_|!1trvKNUm~4yh=b|#${T&xx-E`gxEmZsT3zwtVcek#RMlfJ9YTL06O9H7ruSy z2{9lCxCz9lyltFU*IBpqQf$$uBD5JocI%STC3MSEFh^Jz_l@Y8_@uL1uezKy#QAeK zETtlJh?WW$TI~|G9L|w-Yjr%;uNS6YEeYOpAib{-5GO!wj45x+PAH0epQn%*Y#7mF zpOto3v_<8)dDsa4uOIE0@!HS2MKN=Gdx&x(ews!TjoiP-a@G|;bx*)bMJETC>{WU1 z1-UCq^u7WJ{W**55*pgqI;!kyDPulgF$zs}yTKn+vy z^056S-EK1jl#cjME|dFx9G|CfgGkI1{Bq}xMM=M?!!QjHBWcxI_=JN^jbuOR156iR zJg!`eYUTetC2r1?d0+BYZR}1+p5w=SNcXUzML(xwZ}S=7b?(+*G5roRT=AavhVm?E z9iaA9R(AUBV${I$vYflG4jK1aJ(I>?cw!RMFdl9|8Za~Dzfm_bxq`E)Df#g`ADy}{ z$*VPv0iJ8d^cg0pnzqdGUKo)sOG%4|7qZqq_Mf$-hZw}VX=)v!SMH1smp?c9W%yUd zb*Oxfe+lW#v9jGMi1RdW_(}`w_qrvrlQu7DFCNf80t)v4-eEhdAM(We`mkVV^gVLV z_HqoAYZyF|D5VzR5r`XvIqEmz&WimZIIT>fT~COAa$k@y+}9?v(&D^DAvjbLW-M>} zcyOt|QXbJP(duPMW%9xYnsMPM2rK+^J3YD^Xs^^rNo+U}x4d?x2h_vW>RBIHWr`7V zxM$$5~s#ATJ zJ(>=ATs6%@KiQ{t%)ec#ad_#qq3zdBt-|K2ccAi-@Cn&Iy8NK744lxO!4W>im*Q%r%zI0ezN4pe1 zW0K&|Fd@KHimJ&vT1F8ZAlu6UH7=_|0G*_1z$9v)1cvqC0OYqt6eUJFkNN4zB^-R`#5-zGMj#6 zOPiOrN(FpQfjWu_OLNEaGI;Pq!5#%@v-dRBanky=i53h6uxiy4)TWdeR+M(%q$8sO zN6DAGFGC&XVyPIb)uJK-+Qb3oOjtwH)uNsRm<45^s@gL0?+uEa(9*e7`WBou?n{1z z`s8DLHz^q`C~(I6Q>KODbBz`7-~@U5NShk0a||k=a7qtBes)KG(2V~;(Y(M+I5@N* zao$T0VkzPSOR=9k)NPb#$;zoMzXJa72GU^OHLo+g0{*t2y~O)=%^bfktbW2(k%k(c z#V|oz?kXU3J1YCjbDNCW*0*Px^+%GHd2jU$lU(j%>;2L%b9R!dep2!q7el#iXBS3R zk)X^oEmwZ?&UK^^_m+JC&*Z(AKx863D|!w5 zW%fgM7}*j?I#rB+=+28D!Z3)bc=(2HyI`GUcbSkQoh;cIM;%>#c8Q&ASW~xoEpV8bE*gJq}?5wr3YpbCL6U_KKCd8A@>!@W^f#> zN$XEQZb(O2?Mz`~dx$Z;gf!~&;bjCovCOfKIcY7YF~6nn#M-R7#?AVX6{$lEXq@1)N%ItH;8U@r6!U^7xtT9)^_!Lg6K(j&Z($A5#3Bm z%ToYRK(4<{JTdpjUT%Q0W0!XE6Y;rL^*rxGU_16Np0LPXr0yri3Z43ikB54dzJ3wm z15S0}sv-SfKe+mcikvFzZJZZsKPV)L*OFHHEo5l9s%Y%ZypY`Pi(?4Pu2=4IIlZqc zZg#Nb*8uJqK^;UFe|ro-kVgrSd%)=(wnlOe zl6izra#yy6t%$zS%7H}xx_VyragYN*~DkJWr#w@pBJNN zz<(t&My6}qVl&WZIWVLhMM=dgS}5v{f#8?xeC#WLdw)V4<`v3x?Sv*8c{TZ7Qg9>* z6B3!7Oue3g{DK=9^rfI6#h`011#F`rqqb3 zP4lVx777SsdF+y0~kVU(;hcQXI@q&vW?9~*$d0sz@hfJW}7PF z`qLy5iWr&d%T1DJb~ca{3CZ{_M$zr!`%icWocIc;@_G0ByPtNwzq5`7Z0=>Ejm6RJ z6#&e_-BDdi!2c(R=(1Ua8y=n9aCTpJMRc`NezJ4_g*~%& zp_a5n87!9V!O~)&Wj{@2jl=WmP@f+g?k(1+3)=kQ^y9d!S_xHaw(|%{+}vU3hx#fN zJG>66Xtr;)Tv#K%j+P9{lh$MzU%(je#oFf^O0O#Tn#QQIaAlyvqkgRF$Bc(no-?xN zv0=qokEO@%Khb1r{~C+P@^k_$e}#@>U4h_nHH(Jla&+w2cGdSgIg$I856&YZta3cmk@KJlK)fZ+w)ORC8UH$ekK*R!Dq5AFr~md4+Zc5y?ggUFw}Bb}pBRaB^! zBx>9E2??Czyy({Fha#!PcoB86>&JtHh8n4VuB3519!qC$zbEVbvo}{;wq`Hp8N4`m zq#{!`E;32Q$m?l7 z;5Quc%U!pF7V&-8;vZnFJx2&`bJ%Jaq}2THKP(9k9Ws{P1-d{%81WBLOThiB>U#fY zNed5bJxSUqe_0oL#56^5u>=%E(jf|8Sij3UeZ(_0o2y|)T)@$c(@BVdchc4X;4u&d znoJq9+YG|eJ)W&HBG2UgiA%23uq_38lt_KdlQar_NQ&?v@6S04{|*V2X=nv!m`PxV zHeUnWKRtWtMjW5HZ0FTGM`hnmU0jAj@*<=Sn|(*I;o=?dKjbF82P4Le!C*-E z+8<2Uslm%QLwR(EQ&Qj+w*6e6>A|F`MuElBB^Ub~Of9xfm1b_m>uka~1}o9X|;dOHhvr4DSy8Hut|KAnn== z5Jb0X9VJ+iZ@omTz#|xEoKO$I^bu!x-+cB_x4&rcCf_rif)&hN$gOWDwJ_ z=G$HWabRyG1T3Om0STT+<8El?+Gr{;h7j6Iz6A=D+$z^>K5B!1=HSyxl~3LuQ!~Jd zYq*L)*S8rh+^K0T7<18Olp&X)cp9EsT;*!f0o79$^JSpjZoL61oZtsN}kF>~8cRgoppq3N@<8Y~Z!tb{Spr6ZNg#)pJ@aX_TtCIwH~xBZ z#IN!)Jrs9sQyq-eESq*oi8(TFd3ji+69ifKKNAj!>g{>R7A9$Y=x(bOqwEyQ>s!#Y zBz&2iwFF?1_7&zrZ-2NL2zQ%vI&LRtmdgrUQ!k#|H~zhXu9~D13P0ORjXM`vCiu6` znlDgksarTB>$S4mn8uhVb$7DNt&PNs+z1QUHZe68tK;xq;7vhVha6zC+qqk<)`Ot1idF0d|jl zO*KEO@tq|G?8ilj-X*Wf^7!9|d`fu*cn5cjKoQWc^*5QBW4rFZ3N4I_4cHx-@8@i> zXg6QA$m^&rFIs>A+Idgr=d0`VcOGT>D7S0+6sd!t61{ICxHl=Mxn!^nt69O)zzrWzx z%e1#nai=zV(X4Nx{^dh}9~$r(S?|3KRLJ366J5#!hg7C{PlMXY>db|^_vorvq}x>2FZk18C(P4pwqej z+%!2AsFoMpoMyqNl!RDp6GE`;$Lq=}ipn$)^?H?Cc{iRG-%dskC-B28)h3jmzq&#y zPYaCsJx!&d^`o4_z0x|a_jbJ9{c%ru)LmuV)0Zq?sS~V2&z6AIgEE^I=V|$^&Z)JK z>SJ@k$fJXPr}$6y0tR>!d)@w6YAP`m&1`}$gX#mm1;>$T`J>F)%&CAI zI;nvIDSxZhk7x-Mlr>s1; zRFAuhF>f@=T%KyWt&ZJ?iaH~5m?}F-(TKvwJpRe=&94heibcP#G@x{9s#&e_+H2Rq za{=6>FKfthhi&W*ly-?<>QzVDO0z#!9TvvxjZEi1N^U!U{BRk=-X$9Ab?a;N`?uZX z+N!>?1Jd?~V}y}fR#GW3EgxxqW9%gLKwN9-WBjw`LM`BD zC_kq+=*_ihdH~(5|11q@$TsrpkYFSK>y|$Mvzd)8<**!xi*rEZPlKx)t<1~ z7mjF#yqRy)<$-|%6!BW!3mKhm>5ka4wSZi zASwzdYFawo&AFhAaSmA27TjDaj@s3VN!EV4M}t5r=2o-Rs*OQmy*10#4m zXix6U#Mfn0PoJ+I($!u=~628wNcNPhBSs+XH{t}*|ZUYNHf|c>dTD_`mWxSF9@#y ztue3u&2PvgCw z)618QEo;!r2W^*Ka(zyW-8YV20p}nQl6CL6Ic)o~471>Dxb!SRS@`NXgggW7f!*=J z{#=HOD?!TQ#eeE->6b!(%i(kS(6>XRi(QQMyl;D~)$eqf>(LY%onHYq;<=F@q&E8B zlb3=hN$LE2<%~HeyBVS})mGevRyN~6)I0Mf2qhfIN>sl<57F^!GML50p|1cq8gL!D z&~u4^9_hU&14B~rdXqqw{tsu3DxbZ+4eaX3t;dv~5LwlTNt5jb#UeZQRmr`+?(Ubf zFCBJwJC*!DR>FIgde{U{NSdy2x79;)WV#-ZQu#(ImRS2B9RzyAWrrhu)_!N^*W4R- zP2w*Tt?6bvC=-tb6nQ-bclgEFB9mR{e+jww*QwLyPfz_!BfNtr?zC6MR=x##80B%c zuHF-?6M`v8c3a*HdIc~lVzv6@IWpn!8CJ|DFUEZE)5MTkM%1NAQlZAKEl0O-=pH6@ zb{n*FSUq(=(%IzG@{!u=-j0D8zE{)u9@j#uv26cweSbu5Q7-IMV_OjZ^4-a>fis{B zG#UOkA!;_FAY_w&5iN+e7*Do+|1R3@KDkB3S5Tb2j5I7L&RPAyVS@0an5@< z{H==-#}y{8gn#@576r!ni-iz_-XGy9GbPI0OxJ!gYsdLLsCr_Y)A=5v4Lkjzb3$(a zGZ6fe+U~{)kClE|M>ZAR~J~eB-`Z53k6PBrG;>ejBwD3izbR%$jiqCxfiSFUo4Z?w!>nl zP;UL9_nOMNLTn?6lX0SyhJxSnmej>@C{NfJ-6l_k?b@%{8pdLX(U7t62wytGytKNc z)6N4r@y!`h^yi#P_RRA#rJ6IWnnf&vLcw5KvXZju3CXq^aE@t$ zt^k$ID0l1>hqZp6JO6Lz7j2qPalV@T)NU)MmCSha%5}A(Loy6*1Az@$Zcf%nU`whf z*g%3Z<HT~^Mz59;_vYO zGzoxly&|u7sqsMfV4<$ld^*)eep7GXH$CV}-T6(b7gTl&X-|cz@#mTr>4x_!pn(!| z$X7e)Kx8wO>5Ac8ZuL#ZriY6DQiKB2XB~RDY928>=0E{6gJDh4?eve4i%@ z%5qH$2CLtGWTT9^I*U4RJj1*(>~nFYZSGD0VwY}WI$rm!u?)*4&r~x`?+BGJqng$E z{j_X%cXPsr3+iHCw7#FG0yDW8)=fCDB-Qj2^^*i_C#0Dlesq5Y2rb}K^b*-*;7G+C zg?(^2Myb$pNoFHuP{?mTiT6Dywd&grG6(fC1_F_#aJxi5qOgtVS- zJcoVnyzt)SD!xOo0^f}g~5ypr!x~df9*U*g{TirKWrVFL`sS>w1baG&n2Nl%k z6B4W%XgLBDxIc?H5h?}gmxPQci?TOq^)kCdBNNagO& zJ0IpberuMiWgU6?2py56Cs~|eex`bOXTR|zSZqhMwjTROk4=_YVvI8R76*W|whu>v zH6eGu{{6n{Y%GBPyw#v#Mu@xeuJ)VSLqJ|yO{=q~g z#wDlaX(1`wo8uwy?z8GnJ}3ofbD~5CnGq5tw$id{NwEHN5qbYKcltsL%Z=WA=D#L2 zGbuqH`9Z!ew-yq=>x*{xzOUo+tdM5q>QQFaY35d4+)!sv#XbNe4<|crSD;5ZI526 zPI+W57Cs*uvVkYaHod3LPRlEoJp6R|a91nrWLvXauq$9YIGb)H`A$3HPFqzO^Dc_c zDPO*sle)c6NkmKPUjGW@b*_}hlIrAFbr7TVez;i2A{899OgX0mI~G3L7?9M6QQMy} zrrhIrv5`|&RF763!|L7A&7YxZ>WEc1XBk*Z;-w1m*q0K}&hr*jeiZGC$KZ%V>a;`N zau!&a_AUD7sVa$n(QR#C=NjR`%<`4u+}AB}c+6Q(B)$aLBe`WLH5aPzaNXK$6ckWs zejf#+=ih>n-An`HN@g^+`%U%q2Ut&K?YzlyH9;ORWlTv!Vx)CEsG`BuS{E2i1VX!o zX*co?`YIY`zxPtuNC$r@VK~^{crMR9w>M?%2=_&ko#x|HSmT~f+;Y?Mf1mhi+);!g z?e2OUI`%#;2LhKRa>5$p=Z&S_ji-#T*d;%9@KpR--~7+6{Y|_AyXgRHFpEr zc?FpbXF66yne4@^7I>d`-Fk&<%z!k(Nat%(C!dz_TDYi`YF?G`BwQP;I_p#D&N_$i z1J<1;u@6<}xdqk-z0cguWUZb<_^|FO@^DcYAFL>;H)%?0yJnBi2=(^Z7P3~Vc+b{r zulIh8-1JV;VrC1R>wn5~uCRF2kn<1#Hw5|w(;jdl^R`!MKkuj@ExK5dnYxdQi5ub9 z8u_c9_%as2YHk5?So$@?J@^*}c)2Oz$ zxC^A4;z~~@@XHyOUnV;!8lp!dQRkuGcSWq;Aq)RCze^#Ci>wnoLgJuNkO?!=CSvE z1AjYu@O>z<1SJ3a!5>qTh*pL!$Y_$_Eqpn7SIAz`*INf~l-hzOF@s ztIw8?ac{EfhK(18qsaiY?YSk+1Lu??V@#Wh=A7M3f8-Lq?&2O6Zrzl$ zL@0syw)xIp{@Hx1pnc>qjJmw&@4#l3j@wMx9v=vQdarXb15d~EKL1#F_!@VgXL%p| zEp;&oWr$Yzul*h;A-~`NeL*XRWDhLa>o{O=%TP>o3-fR5FVmf9H&1@5R{%>$^O&|l zP3LT~V&tmKz2@qo!u8&}W+3uiuEf@sDOb1ze9n&~pyyZkMYxzi?pW8P&YvdUzh17x zKe)t+d?zot1cm=|N$%5#BlDF)+i+zGhfmUEXkS}$8c38aveQ${A2H{h^t+{{- zod+BZW>P7~l6@2#SXnV$sV^aAoDyL^v=Wy7${L@vQ#f=I#3rhDG4^Gvx`WeO%f4ZS zaYG`vnbK$FnQ`A+FZEEhva#;|aGXTZXXHo9+QjSp`70m>ujqx=JKAQ`Sn4&-9`Ln8Bg`mMo5) zoC<_J$#rZ_Y@Bmuc411RuNOmX-nUhk<@hd>&#w9DJ6puK#2wo$Mf4QennLCKK?q<2oas zWPfZ=Oua)|Yb;5cn$ zvR^}>Z}W}~!oC6gOlBJ;ikBnD8;6a~Q^{))lEMQR{*NC+mbn{TE!KkMJ@)Rks}?cz zC3ELT<#eklawV}(2j)~Zyvc8YBnwtV5of0K1BZt17f^VbYH5aK>D62(Kzd*b`9xnb31Wot)7G0S1c;j&g{|CBGtdK>&`2Y?^XayPyP zK}VmDn}sLV$lpBU&9P9B2?;1NwtlNRc~2Pe$UiR5%zr%0NuIrdku{O$rz_?dyrF}Y z1HB8$MrxCqXU$rBQho}pSqN^JP~#p+xMIoWnUR4}$-=0Wz~sFvRabms@Lq}D1CDMD zuNIGQ)IpuJg9rH;bxrn2VKrJ~kN$5G5c1?ggw4&UYfSe~8}Ptb7s&U9l#9}q<6u6i zn<;@U7r7zvPuBjY7;I62L-0sFIH%$e4_U%1Cw7!hQx*+nN1Ko_aa?B52vXZYV1w zn9$xKxwIZ43dBuLW-49d-bnf#{o)2;+W{^3nmBt)^vz#pAR)khS^ClDa8DNUiJ? zEwglNz+lP>m##czjlYUugHQ)ynkv0*W`=5<0x>x3R^9V&HraUt#(Ny1;>1(K}r}VffCEeDJl;2vx zk)OK+BtR^9B@f3=@+C?Nv&7A?kEhY;_XH!$4KKX?M~y9AX_CZi@}JeTiV3_i+@;mY zJV(`sN-`wbb?@M=yABNhnfVFR>2@hJp7Gxgjbr{oc_7i!&~AzI?-`zT`nU56u*Gn_ zl|1JLMFdX5+g(D<$P}DAE|edsaE+_{MTKVcRw}78w5xex$;XI(n1dJ&*PTCkv6X4| zx$dT?KYkEDAk@(Yo4OVdW=X7u^wz*hSb-f~c2Ics6q)|!p!HeDCDrvdfOg`Jm(bCer4 z-&Yr6(O;?fJOfh$YO6jyY-if2IHY0O1c+FEj~~U|GI~FiF5NQaeof9?8<7$hHv9@8 z$-E`~NUu}v+csU7e4lq5SCFGpBUv6lM%E|_Hd;_dL%miTEC*aVV|9JY>*vRJWn z?9RVEM&3^XKa!jS^NRd_fQpWY9Kls`*z#v}9WK*CWdy1ZIJ47nQCocL(*MY4rcWta zbTef2!|$`*5t756F;ttI;AQ_Zc}64|1q8oqH||^4e-h+%DYfQUB3*U4ur5(uz}+P( z8;CC)5P`+XVm_fs#7}A~kPf@l%q%h2H@YlJGAI9mzGuRgqzU*|u>hm;(_RpL$IZ_o z#Od)z$%~Z76@M4wLXJ01+JGeLZlim-y}KRn4}l|LItq1S!Qf;FKd3P9-9|^&{{x(W zV}IgZV?(%z#@F#I@#Vz0Xo(EF!9AD}nw0G`EL|>YEcL&MQ3!l7sm!NqeVShQ>bS4X z&jd==-Z9XmQ-Uw;hvy=X>tCgx5vZM<6&WAe7c0;Gb$Z3PP$07aapeQ~l76+n@eblMM!1 zwLjWm0+#$p&2h75CpFUDTE}h-VnF12e_H3}UTbR@$@e;K8X2V19nx1-PIgBQa(&q2 z`PUb451Pd1uX^E78GD_C5y2q2gNtu5n73SA#wq_Uuo`R(!r1V9I-cxD*&U?rG zY6p-;7!IsQHDP0w?F5@2Xg_;*AC*$L2hQ4iR#Vv8joj6Q<;G7+lnyBq0e+PfkGG2E z-Prb$j3aT9zi(RUJOqE@yztN;^cAxI0MY4Olb)u!zlWf=j{G*qr}nL%{*OxNim~O^ zo0ZY@kBQ^6ywkNU5glN-n(2c`#E=Ac1YtZnV?=dv<=q3_vcknx5yHbDFbHRd-{33dCaN8R&EuW^OI;=XpJFLq8kZ~7mn zVlkLnG~k^cXK3#9+5TUtcSIs9nWIzpU4Z=R{{V=_dI;9E^3t}+w(pJx0_t>ezh`L z+)HD9XCaN6GlP}t2K0a-js`a!(7F2U%Kl0z-yVDr`gW}k zh+s{tcnZis;JScDeaJtheD2xC2*DNE1x~yas=`%td2Xj?#o(io=&B2FI}z;eR<)l)tb>;zANL~ zQJcuey?qOpv%lc(aWHr{SMe0G>UMWAH1Mcmu67N(usJ+uC#6@PR`C_al`n;@ zFWD?z@hoeCxk7kW#^9v#Kp-DV?DSn*Ptbfjt51H|lH%871G$jD0Dc0vjS5IoGVVvp z3mpD+^f)@zX6McDtrnlw$H8N8^kAu|-$OTs#*hW;n`S?5D7 zr}pKPYjC6GEGI*>`X6rf>PmEKxYMg{ZlCZE4m7b;6su6bD$)G=o?WTPsjOe_&-(eX zk6p(Vt>H_P{{RUVmJd(0zz^dftZh(dTfG?0ZYBQ!(2ZK~!aTkp)0IvMxr?7nHgouO zt?_YAjPKc=T?sWf+qvjgm&WSmNay*MLA3jVD=$mZ(mU~JTBlD+9KG?;I9wf4t%*92ldY#Ct}DXi`B<-|df&s%K`b;)PDy!JV4mCzpUhNAXjERdI_)Mkp62r3 zhL&rD+rC2@<)Pen5IGs`-nytKj`KuS8$^(wEyj5)KZtt~URiT}Aezx(C1kj`mPRYz zfUEqE9jmqQgiUecdrN&$#BkhBJj8#wz&lRk$2lUtV>-jB(3MWG*Y4cR@8)&Hs7=SiJsZ(SFdf_)>OKT9g=cE7#~sXQhZO*pFq>Cf-?!Gl$jk*%E8pO z2e9e*S4pT`&3WP7Docn|hp~~=F~I^TO9T0g;=V;!Ql(0)HH@ylx^bp2YXYKU(m87gKE?P@6-!yjbQ% zNg~UL@>~KiN$Z1-GhIHBJ;P+5w2{xYE1c6OWAT^4rcw(V$a)SiWH_ro7_OXJY444Z zo8?oy^Tto*#aBtm_}$=z3m=v)t%5vQD#~O&o|WwI-8Q3zO?032_?`|Xr|#yx5C7Na z{84w|+b@ZJCA#qre4n>Y0;tOt4sdWeAXlNr_cswif23Wj*S{_Hcj&zjp!_opoxHKf6h;S7z$oEHImX_D@UJ)c z$!X+jHhP3ZY)@|_c^#N!{{UKLcQE~jr2%8h&tN>P>M?PR;BbI&ddi-r1%`Xp+w zXu3qYrP)4Rw0p?uyBNUwl5t)bBDTB4(70j8PipMGD%>c4l{>v!|?R12v5u~GIQRv z?v%#hG(sLaDbMw)Qq;~%Ms|J%mD5^-V7cU#Ce(u_2<)sdewFVUn%ks!xxKfZP%sku z%-h`l5~fCd^ItyrG7GDnW(Bpk7f+w{eqe}^?8J;8#=ZXl#BXs0oN!%7soA*UV!b|m ze?^ez`PRJpnJD|L@edQYxYk+=ai1zQL*Fg8EKlNV-n>2V_A}vFW!9L)rFQ`Q1z%o! zn)6SGnk}xYu3h+}UHLR?2WcdC=%ZC7u5EN{FWyYu6(U}x zvGm|oRH>zr6d;r4eNF8P;ogru#1mMvGF&-8*x{9Vk%t_G1Fxs5mAsEk(jQcf1Wj~;3n-5sRl4v#h0o02hovro8|iizTD{c4+sh|! z;p<*(=G#5WaYJ@1ux_KlR1y~~O8661bxQQk3R=YCZY4SFKsD1C4itV2 zwW@j?vP+mCypHoon$4o}C5g8bJ$(=5#bIgatEpL@gz7LK$cpN22CnkO{iM)de5|W? z4&2DN=;eBm`2A}F%oB5D*z2gp&(jszN^Pxr9xfu9r&+c3tp5P7xcdu0g=6tHvwa}l z8u*U=3gCR$-cLVH)!BHPSc6RPuDN{9w#f2HDmgd^$o~K!$JZS?*BPV8WY;yTeC<{7 zE~Lcg)G)6f`1h|lhfMImm7}v$HQm4mmnF!JPp&Xe<6RS)g%u+ON>#Bw#cc0?O)hv1 z#-J3*EY~pIO60N27j4bS$>>kxSrFe@O)O(jAjJ{gj^qw@f$nO$+Ae%{L zmbEvBZeUAY8qw{_{{YKbWG>Eq_yKe1E5m2onKl}9*P{DTwOm`+Y7KaF~qfpr^Q zTVJ%eLJRo?LH_R;C-K3l#(wILJbI`4qn|0P@ipsHnzD@2^<1%kt3HmfxJZItKNu3t zpJDPZ=5t-Ah3?&xMg;S78#y2M(XS=(9;qIm;q5~D4h&&nP&;h{{{UttyWbC7m?7}% zgOLuOD<7yt{{R~Js!{i%{Li+;S;hSS048_`#2ei=TJatC+TSwi*NlVIB%nx#j&K-| z59ip|%bKYX>UStP+be%M>iko$O?TpLUhFp3k@APQ+Sy;O2Omn}H3>}DO*!Mu`M)~* zdNFZ|O!+$2Z$nPdAqyF-b$Gn6tN02N52(gNME<=ki{R&1F1On zHLQ#w4yVw57DczTN$(qU@&E}vhhRMixAmz!EvzVy#Ws%EA7hb#_fTtXKw{$^b}l`8R#%9nSgv8U zTotu~IKLs2{OZ->LwB!fuQ&~EzcibD2t)mAp4LRNK)TmhZzq;D_A!x>`4DSpG9?=n z?5`xWxm$=QSmaZYoR8^V^Wd)#-|5~e(Y5=+(Rov%Oc>*QQ-17~Ja9K|AdcCunwog{ z*sL4ZIS25?b=LaP*8c#}brndi)DJG;e5^>|0z03lKE{;0hb_}g{T=v(cX(q{!yGNV znfwKQXy__e#Cl2SMg6D#j!OH*d`CXN@N#WZ>zj|UG;@*3^AHqc{{R9jpT@o!(;_kW zlSvrk?e@R=IV-ZJut(0t^43xQw=n)8znSBY61d|Uwb=gv$kJ6?$vL-%H5-~JvfW6+ zjw*j2Baa&Rjo5J7t>Fj$+dX97J9+8}`+-I}8~`hV6)c(hjvnd_zc19kddix7P^;&4 z;A8KB{{Yvne^`_ojyM7g55}$CO2*#O1JUI&ez^XIv-O23GlTpwp!~%XsdOxCuL(8f z`CP<+V&*LW0C{WZ?KWMKK!YXA3<2M;ubYtKRBu8n=s50L2(B-Q+x9y{Dxdb>3dG}q*imO*_uQlqRCX8~{${?B@nPSn#xaD2LHtR{{VU{K*`=RXx|(0S(y>3CXqKi` z8MdvuK6yB-xmaecM<`hLA52zDIW1b)%IANfc~j|k1ntZ#k`8h~<2V@pb;Vs<%G2Rg z<&Pb+T|5%Ku8#%85U7q!p}5Hmvtl$dl(LbB!F_m@b>RpDXPJAxVeBU5y4(7IgY2L2WZadXF^YP zA%`a&kGQOyQ}~ZxE{t5HMNh2Fh4N7Gomr=Ct2ad*>aV(OD z-TYZ3)+s9;6(pQlLC+Q1d_MmGk>|rIJ8HIn`aLU*DBD3b+x$IW_^rG$eRXS3{T`J^ znH1kM@;xu&6~YK^AWjeVyqnKv!@o7=Hf~<+oVM)B-2OG_9y^C@(u{E?uXB=5_Zj~H z#=O@^V$Kh%6Zx9*>c-UB^_i64wWgnC`7iT2V(OZ#`00fI0MJEZ>+|mzt8>U#Q=k3= z{{Wzhz}2KIX<_5=za(&CDtnS%rTaTnT{BF)sKBSYSGL;6C8<_UmEn3zFx0K~{{VbfxeLWa z8;&q5&BiTJ^toGHjh><4_*+-ig{}$AEax3?yw?$^EEcxfjm_ktA&unSxbP27oyqB3 z_M7oY_+M2wx>O?OWsTw+ju+<`&%P_p?=Q7)5Zt_%k{yobPc3*rPkeg&*Mlqs=U1Bc zN7P3+g&J-P-hDsd{{UZcpQ+u?txC8BE#VttQP&wKC*0<=Eo`TpBvB~`x$G;W(=>!o z3$IG_J#Nb1!yXj5idZB2ZR)`)$=xDsFayw>jn(T;v}r*?3()!6SUQ-A%A}r&UHUKU zL$2@zy_bcen*Qlyxti+Kp<)3j9e;;C3Fu8&@Q$}{sOlPim#{j;uiMTE?VFgkrQry|c3YS}{3}YA|f2;(499AF08{3IIU1bLwMz?97U-Ua&`{ET-v8;QD*W3GO4T7D5&qSo!_xjRx8V#m`6{{YlhUEn9g zcL7FE+ZzL?{IoUD=@yQZCCzTuYVgQ?B^HNACqAt%~C7O(uz@~Y2J4p zV}OV~$mISNbx|PDWZp2-9sdBI09QS%N@l-`#eVEK2kLN7_*Y!;HSCZ@xAzfp$FU%A z1#{u)+C1-fulXIAnw-<*TSrgxIG+zsEr*Eh=Gs9=2cJszuMg;v$MG^dh`p0H05qy0I~GqSEYgP--<3 z^FERARN4xuZFthcGfc5 zJ8hW5D-}8#n#s~{ zzxf_z6LHO5LZorlvm$!32m^zP_nJIZ%JXEgW9!Y-%tCS@2!Ylj4m$ zo{ciE=4-Kh_T4jd^E_FD@t5?$&n5^Wj0;g z;2}nh6!%xR%?`b5^RGky(DMHPiaJ!cem&Q<8wT3zsof;ejt&H*Z(mc6qM(cWJ{zb+ zHqj%;xBgkF1dMg#n(lm64T^Zf#1c9enoU;obb{1jeEC@G%Z_JmN&MDJ(4g*mKG95r*3fN zc|()@KxVvuQPO3yki|M|Do8lwXYj8Q5v3k)V=Kn{{+}c6>EkP6Y14R>%TtwIezx=b zjLUnOVsG6_V?ST(T~?{DYnslN4wI_LvMeqhIapz_f-p$xJ?oBiR3v93v8!<2JaL#@ zQb{!zxK9sGa*V1)Hnx4e4C{y<2{t}HqM!z>OjNlU z&H#{t0RZ8!PX`_G(xaQXzb|4q+^Yr^uOiKJD*0o1VaD8?~;PMvrElM;$$V!OJE!~%hv;s{RmgDJ>(ZM zL2Gw)J6mK4M7>o%iw7rs4+P`a9cw$qdda+i z%D7W83IOAS$*NMjF_d{?Zup19cOEUev+!qwEc}}Y+(&N&W0zyb% z;GGr3uv(Emt#-e5Syzz~p6G{2Uqj%197SR?;yqa~(vG&UIN?qEvYlmC8JWsD1`;)j!Cgv=2 zh0S31JJWo=rwguHUTU`%*0Cui6t+A3$AeH?>7F3a7k$>8CANMTOCI$4f;t~@U6dN; zm1pDWj-ZP+n|TbD&V*+GebNW0AY!UZd%o@M{5y4b4XWpGmP3FCUza!-trNb8}H-FUk4Ug~eBMgIT{ePyKG z%l`lgw!LVJyFMYm!1X1VSBU&Yb8|76P89j!23do)tbX~tRZaX(m&bnPk!usZuW;G{;XR#-5 z+HKrT2tJI-_YboGYm=ViarusYo1fOansp&Hcsrx$WSm{3hOO1efvlQF@)?5|_Z@|Y z0FXE&b~V}fQ^d0Pa`r3Rc>K>S5wx~CjCH}T9yhmcr(yQwRRpVbkQXhQcXRF__b2Y^S=q3={P~TG4xd1$ez=y@O7-xw!zG!if#g{{U(^ z6KIf+?y%;(D@pi)4c?xbe!p(FHnI+3lbj54<3DkSQHr5;rCn(H^{w8GJguco^F=go zkN67_(TD3;x>Am*>TaDG=_HM>9(+94EPly7p;lJ3kDaLq^FC9>((3IDM8lG?d09MkL|pU!?rQptM^Nyszl0Xi zK(@S=_w6|)n8=EoF^&cl9Os|HpB?qO#))@-8aTjpWY0Y3kb4o2aC2IxS+ul|7S}A^ z>YRR!U8+VrsqS(I!dk>iuO*`h`0hk^*|DizgU z^n_y4^PR84{--?sYa3A488+K_fMhT<_Fi?y4F+i!27%?Kx7i$XBY* z;d{tDM{lTJS_yQ9c2%ANC4_B266x+g9VtSjW|W}rkD1J= zdptDdf3$?7=@ae0js*q+VUA;tVFh2$)*OEt%H8ayAHSOLxx3V^t zb6Nm0%o`r+rn;cDJnR)0Ji8e*{vJ=wp1p5pYk%-FL{I=|H6hQY_A1BqsB{~9NW4k1 z(TR@QAfTSRo0j}dL*TpkFa8y38eR$;8;DpEdbwcSeKB1B0ERB+I-i*?;Lc0sxDEl^ zkb8`baahq!T|Vsb=OuL4WOkk?lkAdRdB>2ul=~jl@@Ak4Urhf1-z3-2zCG4%Z~p*w z1_H#OGZTVOarjrywSZ<_Z~8t4I6ur*cH^49Bi|4S$-!gv6-|(X&23|! znCUbvLVFaQi>S1eY?&NoTzUcP>5fHnO!#tBfu25C6!`i8SDT&S2(2G z$YLh}zEV96dIp(gX*Y#ux-x+@8~#J?IIN|0vGkb?T}tz&pV@EjvBTY4i*Vp{5@3En z)J}onke)a-(HLx|uxZ*%%l5TXAAN)ak?U4(?KGJ5`@)ThP!k?cCoPO)*mGR>vF%F( z&Cev&t$qDY7GIVahB-gsU4O&Ef8jIm&VT4FY5xGD(y=wGdq^X&YiW1yg-iQ32Nl@- zFtUnY8hBngn4^DbK_(BNk~Uw?sPLtSm$#J^^|JjAkK#n(FB}e{{{8;|@$xIq^hM=9 zweCM3v}CsNq*`8&36==J4Axr5=0?BWJ?Y|s zf@#@amFpf7V^0S(4tmqa&5*q91fQVmN~BXN=9Z0tsYFjfU8jgI96~MNJjwTup#K2t z*9m6KCl3%L#tSw(DC&O-=+@#pj5D0q6=>c`o~{zLskpAB8hwFg1-sXKVPPC&IOecy zrh!OYjMt$ZHaI*jb>b;*Qa>g^3PILJBobKv0CW!CwZ~R=-1RWjxwasM?c-U?^3cR& z0D6KdKZ$yS&!odFY)ZAIp8o**K5m{IrZpH2ylI}fn&66K-l-xm%bM-wd)r~s_EwMbCe|2>W31EL=B4u;z zaDA&LE7G&?cEW?wy~s`zig0&cN1oe@eA}G-&x@BQ`^LS}!(4)WBGT2QRF+V?q$eR& z4^lgA!DH+_>%m_SBe3pCtwD0eC5f9Ghd)pLzw)k$tqfx;JDo%Lws+L^E8BKxzB8&Rfg+p0)Jm{{TghXOQ{*D=hrwwv6o= zKT5G@76u_0|5)wPEHE&Y+wOs^|5^aM@*UatP~Q+3;%9Ow_z-96Gav zwTj+Xa!xtM7oTElKjQxYgY3LJ;;E(4rAc)Am01&OWT_(ucOS$?N7lSA?Knp)v*V%3(mM196MqfYaLOp z3lYgD8Eo|b0D^grts5oWp@s-JKT%Hb(>MGi79o21qZ#(DhsE~RtFG!0*dH<#xs_ro z&gBk5pxTVUAE^LUD#}M*uSobz`1qyZq#5H>w*6%l(EL#FXM;373foxl{-O4X?xtLzB#o}6v&a>5 z!JZ_X()nh}B2VRL)GBJy?QzcFl6Onw|Bvf#w<=5X z{7;m|WtA&nC;P2b^t0agceitAK=B36i{q^@K*U?>vb0xlo>k*u6lHp3bsmPM(Qc&B zJ}bkgSlqi>z0Bjw+)9|tY(lW;fBMzVL9ecvDr(x5q;|h$Qf<;jK6A;~;QYZs3uZvH+4;KQlDJZqmNR$w-Mb;BT*~iKy!hN_N`0UEiZf}si%tJh4ni|MzGW& zRY@h>2{Fo44fw_~Mn*?Iyk(ZVquY4m_Tyc#Tg^7&_ERF{t13H_xk&T}Aapg`c(&4G zCB>DdX10+)u`x%qHvD6S9JfGE<4MMpl`1K6PRd-{T`M-P)%Q-?JL+~imFZ4ZWclHH za_-Z5C*-{K?V;j}X4g|kZ*IAkW(cF)*SYvtQMd7jhjqV(5$#y*;};rRbS2;zC+h9V zJ;M{)ythw#%^uF@#8X>|&`8#nuIhFdV0XA98QP%Z+>c79q2d-I8l;yc6+ZUr}gqH*lG8kA@MD?lNavp zZ|)W&yZBv>8$Uyu?!0fR#i46gJ|6I!HI2TS!Zw;SwrQjT5q;1h zUacwBR)dSt{{S`CkIidun5RS8)2SFgc|H8Q{LL*t$6BJKaO+YHLKj>lw*!2zxZ^7! z`LKA%B#P^N0ixM>n^d{I)rtPi(clv}AIAB}U&v(p40p|Pz7N%O{W4dV#d=kx#+qYw zUpCwaGLiCZILYVlcE?)jJTI*q4JvkF95(u(kz_F_RZJ2Wf#?YotGPlIVb&dH-m%EIPIR9 zuG_*^mXaNQTd@o3h66RK++fCLa@i~o2X6ppfKE@h4QW1~CaAFKdzRc89x%r{1CT(; z$^)!erwGfecSw0wks{L)EJ>Vf@VmMu|bq0+Ze1_wO{QY5NS6*U`;Yu z=ZnvQ6U#v%lPtkcK*Z-AsWls~5ovd0`!0K{W(V#maNmeEx2X8C>%*Z z5i=DFAx+rBcjt}`W}{M`BAlEe`d%*GH@8*u(CUn-)Wc3HQeAB=dhLFU?R-b6UfKBP z#n$?8W}WqWmI}XrXWARg+3rWDIL$vzY(VyN6=#X^=R1jJB#i$6Bk5Q^ExC^4;?|`L zs>!!rxKaqu$}U?u{3^b!tD*6Br!JiV@)~2cV^g;S&;ee>I?A3qmF(kxOEi-F$i`v) zt`)C4efMXn={J^kUL%bofg0CTZ?oLQRBe%5W9A(R9Y$-7@OG2`00`g2h~m=kBzpvV znA+WmD#_%#0gi_R1Jku;$E;0hbN#1wo=mXqmq&6}Ry{mG#?X zMw7`a6aN5ABpE-60=#_6nw!N}qwxDSwCg2%HQlAB;m~*oR+MN_oVDk_{kx^tmCrBn z4~cwNWv^bU>Nk+Z_KAF@^Pd4XoB_{3IIgx0S55H`!~II{U4}UIiy4(|Z(&7^<_86m zU7!cR{w!ya@0>`rzYVXBByST~t@fEL@+`cSXv4WXl#aV{4i5&R@s^*e!>ulfYCP>d zn>Iq;AsRZjL$!F_{o;PTD;dV0H99d|lG0q;T`OH*ubO=w&sLkCEUHUYs<*D+@D6`R z)HM{kOFILTbip?E&#&Ruz2n1PARadG-m~C{lq=YQb*1{Sgl!}0$FHvZ_OBAt7F`EU zn_rgO&KC^UmaO1}IS0&eN22_r-0{+;(R^6i7N>8i+1W!ALckLlWo?ItQO|LnwWeW8 zrX~~my7!~A{_=PK0D%5w;i@WfkFVY{`py97!orhIeuE5>HNeu5(z`t^76P+fNp0BYm8*t>9JR zcR%{TJu{u&*~e@S%hx7Oh>yCQ{EVYne6cEh*nSyzOWI?%&H&rMd^?{74(e>)BpS2SHm zzx1#3Ey?j0RhIFkw0F0On=2e_Q4xCuRU@G8c{S+%8`TGo^nGhaymJ<-Wo|Ak??x6! z8N){@A9+c@9maa`&(GI7R;Q;+<=k7i8Tni(D8sQ<91wl0wD9%y&xLF?9a7{NE-xWl zdwARBb{oU9ZR!Bpyq>(*Rx*_}hQ1ccdUCbi>+fCsw=knpPJh1Jul4yIH^c8AT=-%O z@qC8<#^N~ATX_!f6vzV*7~?w$6z>%HcxkG-_lD3ir57(1Q&@JLIt<9v+?6`rzDtYUJ#(&RRy0j}Q z)iD@HcD^X|lCt}xx_8-9(!p=+ToRx?BVuE@!%53PXvtg?kifjdVf>)ihPbwcDB5m*U9Yt zNvBdyzaw19dHGvU(CY-c_Lbm!%X?FGb*U)d<`L$roPty-$SaO9j8}-msK=@J*F?FB z$#mBHU6_^yQi`$(*Kt)n0qO;G+Kuhrn{RVIlO*s=6-;WO!>bdHMk(qZLms0T) z-27R((`JKwE3QthxDBON;j^APVzRX_5D9!stBY${3?}9Lm4!~$T=9UTisO|@!xJd? zjsE}}FVy`H0hnRqS~B*!SuIy&Z}=NlmrL>Wu8?y2ezvklcW(Tn7YAu8(<|83Z-=r^ z;r{@Nl4$n#4>g!u#c0VTSwb@|SEfb>HJcW?x-^ZfU)#rPaTIKpu@U>?%rU)z%n9|Um<;_%0=(Q*Whi26LvBlY$zN6Gm6!Z6<7PD|y6IDu z(~^FROa5Pp#_B#S@ibbs(!;9WBBai!49;)^91c%v_5Ev9()Hg0O>wKniK|&$1-H40 zLK%h=a>xb)5DEFQo}TsQz8uv2HE-hiWASF5ms)kr#ER@mSBrZbILBSShk`3p#x^>* z)&BspV;*&|fAld7dn0JiJGQSG1MeQ)O?nj+J+&%PT#Z^++Uahu*UdhT=g;BiC5EoI zzMq+~;SFBt?WNGIn*wlLk9>}Q0nK{eg{{AdygjM-au60xF}~HODC%NCw{O&(gX&mU z&l*L|$A`4n)}*)dKeTsR+q8g$D7?zUr(P6&Po;GlKZ<10H3%=FwvHf;hG@3~v5(9; z`hK{rvlvyxLQf98=)1+=?I(Bg($D5_VO(jrs=Gb?AN6w5#XpNYRj24y&*dFLMMr<` zkK=E^R&RJ2Rn^e}|@V~?>ZxZS1B(pMo zpatoPGv+A#@;!*IcVB{Ye|4!@A-J-;V=Qu_?q8ddt&W5q#*}OOdi3W}dyY3r{{S`h zPt7mXa)%@(7{7TveLu;T$BeHpwE1-HagzMBOt9Qe15XnWMg*UDsp*1FD@Wn)hqSF_ zwOxB&22C?UxAWd0apy=wG9f?R!W{ksy;tylwWaCN+{xnYBF<}BA}elWw^1RNv0gGj z{w(qM)^~)wOt8hP=$DL^@M}?Q+=>+vKp9scbObs4g(~h*rCu0VTWwa_y?K1nz5M*e zPASfut4jO-0KmV@-m%wpRMKv>rMPQ#n6z-)+8-)7dykb!zyqP`dz$F{AuN78@YbwoL= zI*$hU_FoIdsoU#N{{U!d*J2pwfL7&9fVtgZa+5JUm5=Kz{XEuis^g@ZELIPJ}1B11iJhwANG3O?I@+#?{Kg3mc~Mp zz&Q1;D)`vXdljCVOIyZ_BiwU^XJ!LE@C|P1ejl~F(o*wGxz3{mlB|}<-dwUpo*52C zaolyqXI2Se`J(wQ_iN3&HQDU_Pg=?9o$qoD-xlm-S8p^aSz+bC0O_t4q5FO}V>)l>NcKAo>+G=vE#m zm&AT3@M`MNM!K{brJ$NsP^`nxOa{k(0m0~VR~XKncsB^U-Ci!0m#d_|K`9x2o>?Ma^EOMw%4e8?@*M=k*CjoWu)s2-K!_7-nzbv?byERw`qb1QBH z^(Qsy`oD=sh<+y6*pmo6f3k5r%IZ}U5=s5k z)xU-l+~~g$?=+>FNvzV|=?q1QRtbWq8OD9bHOhE-rtsi>M(}x8zEekU3H#u<+Fgh6 z;B*I^*5`n3V$-}_bt|)wmUj&7dS$kOlkRKE#zsy!sutM*G%X~nl!!{d{^4*s$Klk=tWFT>N&@4zI$K%j^9zc`((*zjuoB& ztl9gkjFNtU6Hr*{iSZ-h=Bwd|(Ngcj@*B-MFgg1=f;JCcGCrkij(tjd4IW);>f6lz z(u~6gng~-897w~b-XtjY&0zRj#)re&&Ay`rlve4uB+nM$#wH_p>x0uH+*XzF^=eU* z!>@WRIJ@;vRp|Z~^EqXRkF;LXXTPKVu2Aq-hu_8CC)4~svUy8)CM1x51Y?(PzzW&% zH;G!$Te{QyCR<6JWQ9i>%Hf;oW> ztN!jzJ+WM`h;+M{bt$zgIL*zK%!XK|Mc#xl&JHtzGtk#;DysG4DX($H>iyf*KQyiQ zoK8HDX+L>AeL8%Zp#C(y)b!h(H(%L(CzRi3yO}|_jN|V4pLvys7-Df-e-3;(rRo-% z$BMi_hHXDXgL=W73FjcPqaW^#ypH6K*`@HFv!rRZw~gZ68p>Fui*2Ej-9)BfdX6$z z8Oc0n-m?5D;#C@cvEjSNOK5d;1sDLRvVcGY4uIr-Rh4dN)2$3N8r5pH)%)DOXoS$YA~p&yDg4*c&l7XoeJjm;#ozdin_*F z{M%)a0!Ko2fHHaFy(dDO`)|V<{LsC;`qUyHuq69Rrt_C|SNV65xGNFC1J=CG7?zKA=`#0U$D$5?=0p|l8aa;sCmx%Oxg*I~CM%%bRT#@xtUFM78r|}QO z4+ER~bxlScQqWBz0HwE1fNsZMTppC^)15p_8-!i%uNQAUS5>C>)8<60@KbMf6wm>(|m)+05wo5)@eDVlY_X00nc@p%sg$ zqU_vne^&nh$ocvcwK&PGFK@v0KZc_7O7TVKf%Vw@?JrY;Rh|h3V%gFA9G^b51 zQh#!N8Rc+zuf++zXVxzAqm9!!&*n~ZU8c2nqWFj5)Ef4oYZb4EV7O?6@FK)6qZ@K$ zMFC3;{onz}Irhi!J?Dk3_3@C4= z(3bw{#5?Odk`|Z{Jc!8NSEt@$27Lkbtq%zJ)Odeex4n+eD`;dQDcUf=n3#>+WP^+Y zjCQEU!mVmBMwGcjqT9abb8 zoR548)Ytq`JQ}^tw}sg0I(4*-6fxTq8#1BB=^b#+#~CD_rDV&0Btzn1A+NgHF={>tT6Dhy1QFh+nbls zZ|CM*EawSTmGpn`E^YWr#QM0@?=Gddb&JU%V~CjIjI#`EMZ%1BuS>AK)4XZn?Na+w zfexplVT<4ab8hOyq=TVg6^L%R0=#!hw7$~quA;rPk~w7{E>*LH?g-$i&uYC6r$2{u zJw_?>71pd#79+!mkT@*EsLu0@`d6Ed!a{R$p>Ai(YkaTxT=ekulqpA_&gVnnUmIHZ zV$V|0_2`>Z(6tMQm6C7X6p4|+E4!f$gU62_XCMde!MXDwApPU+7aj*U|Zlacc_{n5Oa-)DB#6f(Jg-s#ErK)uf=L z)tg#w-Jg@$`jn)muWn|qb@JVRT@1MHelF?l=gk(Oa|)5ZJB4ZCa62+KM%EJ4smhg_%!oCDIh>up=cdR^cAOKxvb)bSV&NXKHQn)NRYc$5AT zFN(eZlKS2_wODm4Rl0OgrHPesyB+hMy)%mMrA}1wFm9Z6O24%whba#Z54~}*amZe-A*f^VUt&qJh{22YHa3aXrw!3Z0D&w znx`%N(Srr6l#Fn8k~2}@jI(4Pekvv^Km+ltoHVmXO*(dWI}J0!x`oA`_MP?A7B>D^ zlpBZniZg9i2)mmMcpP)uy$4Lub=$ob?oBsYissJ2RLkYc>*b?ovj9LK9+l_oeQSFs zn{^|I=8hQVgelw%2oX$YIK~HlnZaO)(N25v<^b4XK#X<4n4d-hjHgGjQOnhEFo zrv;djGCJ31u6RSleh7}%?)L60f43-jTHbBN)PQsHcEPOsjRw|cXOC2}f;PzlR+JO# z_hLOOy!f2*yj~l(W)k@tY-)@$Q@6~y10eRu6?LmWtWlv2rF-ewUqWF?dnwg>t6N_) zoQuJF?x&$QjIOTYmc<;z(!ATGS6qChPFp`P$F6Ix@CAmWdmf>0YL@qQ_K};3;+6I? zpz7<9laBbQG+#7)E1huOTWZ5{5>VmFzQ+v(QaN#fpaamK!}RRC71?3FkA|&7O80QNi2{zmH@`{ z>Cg}V09|@GdQ+WOC`(17_iXQE_wBD^3bTx0l$O0UxocPOuCt&)XxG||QCnZf=G&O= zGLj|%$lJl{DKyUuU2Ae|w!X8JfD4a3cZKeDlad8)c)!oP@Na|+5<-lyPTM7LfCDQ5 zg2ZR1YUQQSn!-C7+UhwjW{IV5Amm0%5C-A?&-1T83tFvsH3_9rxh1Fax37Jdb6iTN zN8X%Uf=T`M{{S+^ijY7S<}d?f20ntQ_=Lb^FAU1+$8Z4h4{=ehnPYDV^X_DqBalu9 z{qw;5>z2KE{_FrK&q2j~N3&~l=3XKduNWulR+m-O@#PE2mOh(9wlVb0SJM~E)$Ey= zvYVKXzuy%MaZ4sh9ery4m#1AFO38rSziEr7`|DZz7_Ws`?@vUfBc=F_J@U-6>CIvPtxevh#PbbqI>y)?ip9Qmu_78c|*!A2`KmBUEYpTO>5=4_A0Q|0I8%b>Qz{dlQ1$k8| zS85V=ebuzvroWE9hph(snSntuw96+QG)X$VYtHyJ9*K-uNzApFAv9mR3O z{g&Cc_`j`sWs6Kc4c^8Z3H_)GFRu}Qg?W*E=mZv++7BnXNn@8itoTY6L|Do`awj%TldK&W#q; zrsSTTlYgPCAr_}K(%*^1+xSn!n&6uIPnJDmSd_ep4C^f6M{^lF$od0ZGOM$M+Dj9S zxaU5Y^{-CwmYuHnXI{1O73?zUQP@A4(oPI4zytEAY!=q4V~BXH-A*~N6yt5H;)G?b#1?zGqWdwxbzjCoX^jlb!Ve`9!;!`p2y^)aJ7iXZIn zJ0pO@Ahc*nT>k*H1y$8NDXrO9wz+ocb!#RzT*E5Nj!OVRU`Yp_hnnrYO)r5wVXj?k zzuGqz6JN>Z$-Xe^81*Omu> zPZj4!9HYrnZ;`gHr53$cHrMrT{zp@w-QyL^mezOt4r{{x9q})TZ0{FDy#_l7y!qlY z6e}N1+ezdD!NzN*@cK^_5!=V8*%IKWFFL3spHszM_%AxcscW)~qMa&0c7k{;y{nM2 z)9eR_XOXWXxSa|lX&7u_iQGwG51<@ZTrpFIF0ClLTb9;WyO)1Yk;z($bZO6h`t~|~ zX3tg8uUZLqNbeYD5s={lCm3PhjMtxB$-RjUft|n;&QHB*TgiDFscDD^0*sPI2SNA> z%({YRa!h3Bob&xF=&8b|vWlwjF1?79V=sCMj+a-|Olb^PQ+asWN0`_@tv|yGWASc_ zp8AEiKEJ(QYLpi4RFI_)V~(}6;cI!<#M(iSZy(wgxIgyUdQt2WSBkKjlXq;rT6w?tO2&b`-bk=xs~$A`8502<$}kEUHL zmjM~p8RprQL6+OQjt4lcJw+r~+TJ-3wTpRdu@bXZAcEF}0saxw?C` zhJC9nY83wMv&y!5bgQ2V=drr+tBa_Vi&*Y1Lbo8utO9Z08;gBF^r@abQJsEe?m`My zwVS^;`4}uSnx}2sx8?q4nqFvHt+7$9OqK>^4w2kB-c>cCZB620E5tzK%gxsq$Co>ERv1&#;&2c|u%yZA?`K9Az~ zV+Vb`^}WPbQMV}QB6k6op&3!v-n7JfYFJrUm7SH^wwf!nzt8Xre3PoFuCCWOpAGnH zTJf%|x_+HJa%v8&>a)qUm>|i)Zl?n{%~n1gB4Y#UHcH`$Ig|hhlqqg|7 z3_5;N1h~0KPkn$eU(&qw;z6xQlP?W&`6OiHdCgrav#E%5pzR)5$!XoaKLRBP#+sW~ zcWdsk+3T7oh&(r;+F!*azhjxU{ifP&_T=M$8Ztm9-xZgl_%l+}H2J(-)4?X6Whjo} z-cTJ~vA1wM;N)}CyC^(R`d5XujXHRT*!55nJ9Zcu0ka@p-9QJw;ZN{t#-9tkR~^Hc zlHy1dk_l0UQa58K9Al?i;j4RXbmBT^L+RFTU7a!<8lNb=mXJj_{~%f!0Hy@@%J_T;$)j!dtSijPQJiE;zF?gVg1?OUED z)2t!!F0XLq*h_tKM*x6)*2vFnR;{{69-9QVkbKPK7Cev#_}4x+4E>vt=;5&{tw^Ty z>~UHOE4CYwPbAidh~RsBCS-O|=m`MS8gwmdsZ9foyIhWbyndC?c(&jEB_+V)ZflXR zeH|)4xsrVkB(}7@)o#|>(l)rBQgDhgPjmDY)Z9m}*%r~Y4Ot@C3Opu#!A3so70Bb$ zHI1ZRi$Qq-l)!Hx@`R&5FU%N&j&`;-wmTYwQEEi=%G14dlXOv|t^?Wxo@Wee1UPv}AuA-8*I6!vNfS4=i8`>2s*@6llpLr54)X zRm-R2_zsRuw%8{HL+IlG6Y3u$*F=|RF?%(+awWs)t!MD$$UFt0a@qk_^CSt3&WM^r|PJWfx zUHCg&i^4YEF^6r%gvtSyIRmmB?IqXm0k~r%b*z7hA>YS($((P0IYxi&n&b4Y*TXvf z?}v2DDPxz;vU_%lTUO;HMs_EI#&S>cHR9Cv(!x;0Q;KtvO|7qWdtdUo>)}e{BQqwLpIV_DXuohfhmrb%x)TBs1pbKe8>6ybYp`3G`sB&%nfnxd8yNIpTh{{VD# zroDyCd0As<@z*u$II^i$sTk=E4vIVL_khMPrIk+we8wOT&bB@siDLMj;b`Bd_QjTe z@A`DEG7C$Xkh0_gI{J$A4~ABsOq=!+g`4pZr5_f=E)->vt+RVE?KY!P6r+9Q^VdEzwssL(sU0KM>Wf%xQV{g z9_2xn<&ZB=dTyqW+Fs0z(kjJwb0kcl5PZhNs{@bjitD}^TfjVD1loK?-DJ1A+w)|g z3F>>3ULIi@)Tq|4w=7#)D{0@ZzFuc88dT>xbo;v;K8fMWZ9`B?D7DKkHOb-p)g+Ff z7Cu{oD3CO8bCc;k`UyvEix6PFoQR>(g2uT#B(Mfg1< zE8$MC0bGCyG4-xIbY}@uPFLoXR?AE8)6&Ow8eZg@TPAQ89u=2exJz9#RJ6LglRbdkodxGfVMCt*obluCLz4?n!>mAi8N3Z|tl~zT-6uWs)t8Fiw{*@|RnX_P)F#q3t$rajtcEEPU$ih%rv!b&`c)N++k2>{1gF^;vZ@RKazHuH70~#CJqqW?+QrTF#H{97Tgfr5Jixyw-?)M^>@!>) zwxos?kI5|v7@Hji?@!Ak-nugRPOM=iD=Q|`R!-^dqS^Y~%M}>HPu;t_w!OQQv^_F? ze^i4^Hy%n#-#oGsQZyfV+t>=%@g9e+_zz#X@cqTfS)tl@!0Zv##^qpmI3U&2qFq_& z&TPE0`kQdgENH^nPaHD?^S8Dzr`Ej(#pySSd=IAhj$o~;=*lf`in6;jMD0h)lbjr9 z{qI3s_>5wPCY&2ioSTdCeIKKK@2!n#QRby7EAu6PmtVtE%5z*N!>hjbB9g>M| zu4Ztt#HWB=*!4Y!Q`)X+z7x09?iSNe)-0|rpNQdOBwO3EtH_Fkp2IxXQQ=P$+i1QW z)ciND>Fsf*YmAZxUKl(Ck5G8sp1d4(rrPOW4z8F-E~9&<$j9YrQC$J;k`$5o)|IQ& zjOP~_B(`ci$?4YG=kpU{I z%%g5sBc3bGHA{1Edo(_B+%hp%b=`tN!R3G_n)huRM!N97j;yD@zKZ!TB5B~gjQ;?v z1~Q6rQS=n)%7_w&6dem#LJdR$CfVXZr#)K=yc-mPK<7~ zyI+xWOSHIs8eLH95v{efiyJ~(UE7}|VFL4<9&1O${s~J9O*dJ&)NLWR)nW5tU?n^8 z*P?KF83&r=ZP=Y2;qkN^xJEmD$qjm^i!9Cm0D=4)133BZpJqq_NZX#IbrlumIvAKG zEhQ$lTXMJgqN$};jFVcWf5yfxgW-KcQL!+1e&!8IR8rndO}0X#leHuY&5!nTOwfEU zed7z0VW-_i4gI`s<|%ip;SV?=PER?Pv7X{9mwiUgQt&q9`K1`a<2A_ri$xlK zp(0#MJ;+t^RihDsmK%xrn>>0N=spp_IryL8_m?G8RkcMY+C?}YOxLxArzb1H`>S-) zS9VV8M0r)@lC+ku&;A_$)A4VK_j3OL!pHGMR}lt*wL6t04ui}!!O3BBXAFfvWgjpH zp{~E;bdUc43pd3PmOBoubCI9=Xlp7Nk!OO+@ZtxM2P|*`lY@iCIL~_Zp)Yjt>P_=m z9)GIaNpUPL0LILpu5nO}I%Y$F!~xh>b8sy7-2ILuAzcnjNs_}4s=Zsxb0k}@pprBXK`P&mzYQEM>hx(=tM88arXBaCea z}nKtZ; zAADf^=Q-n^ojI(l;iFkbGw9R#tx@UHt;=^Fw?=-%xVHN{x~Ytly|a#fnKjyL+Ks-o zai`&nMPqKTEX_j-xp4dww;p$)|m`)llXp^E}d9 z9IoxZ#q2wb*3~f4ok&)d?;G3ap^B>G2|_zFO8)>;5qLxX5%@`AHPN^%poJLN6@I@d z&*4ybV^VDvJxXSi_m{SfYKj5R&O>DO1B_IEYKB`$;AB>P*g#)y)xp6BJe=dXu3A}B z?DqKFv$-N)pQh7}ows%8rE^a%r7Be^Htjcew)%gOv@tVNjMc9dWNg{!){(^0Ug{A- z7-MM<&pGU;y-%&(XfkPbI#!a*?-R)-%t1-{x{=9Mz$B<@jh&xtey6 zsBHR-gH#yp!yo>OsPg+p;}#mi~1Xn4|kn$b5EWo zL$IH`7o3iSmFF4bHPO!F6nYR&D(umD%*B4_$8q25T^^fpCZBg6p#cJPLYc_`t9{@{ zy>W1&{h1^TG9@a@$aL$TR%OEGEA+sG34!K3^02Gipm2?vA3NgBKGQK*srvR zq6e!Dz!q{i2LKMdoMU%Wn%YaHw3;;A_I8-YWxx`R!Bjqa90QE^u2Fu@H3fh+>vJew z#N*}2&p%G}=KWPKaY^#7ntpb_5MV9KB$sRFd%i$b}&y^W8T3MeraY!-Zlz~|~lNgdA> zmX`JhOfs<^Zou$UeyJ#GY``0k>X$I=f6v$SM zOzpdjv}$-c>Kk@P9ffLW8XP*ltoC+E=IyjuS1dsDK3wF3?s(4?Gs4qzr$6MqzG}d# zyGlLxI!#B#Hl8Z*#lDxV%BiSne*#e?HhvwNN0&{$`!p}} z2rkuwCVOu{%D{F!8n17wCx_-~w7bDH?3*(fWlRzeC4kRQN~NM38?Uq`ra%c8C`;}6 zw*Y&L;Af70l~>EKeLv1s3m{F~yw8|E>;ws(n|aPZ9@VSD-ag8t+@sms@zUh`Rn5&6 zW_3Og*J1GLYE#WF*>5dPylSA6^Drcwa0eKwwpOs)+cYv?!vk@+GD=UcWi^*FScvVO zHdSrV{QL4;FwC2mKQ6}L91q8>blR}BY28f! zGL=Zx*G`>3uHB5~DDu5``@_MywXcVDmDZ<_0~AuC#sMr@(5UDzI_IrtX3_A3oZTH-sDvSP*F zIaQb|6PDV@7+{{ADs4DhX%8jD6MdrUL@}cvAtP59WAgw>BaWECKD9MM`-R5K0uHM-9tqYW}OY~cH9#BGff4t#x0c@OP_u{r#NVt1B$+-5@Zr8Q#^Yu90 zW}h@tzjF46c^`*0i(PJ4<^-2&fOj*xkU9^3d91A}4Py4{;_@r0S7F=s1&D-oA!g2VTpyvWYZgPwEW6`m%I zI?#s@i&5Zst;BLcK7rrjB{HL2AypBT14v0Fpup-H{30^aLGIolgQ(@QCldz&i??cQnxFM zu}cEPW=0JsWE|t`T}Oehg~yD%JhFVB@ieo*?f$(xXZlwjA+?pSnovGOQvlzLu_Hgn z&lx>VD@VX<{{R)ohTA#+0Ew-a{*Ox4-JHs*jrn4FMy%6MuIkqJG2$n_xf_%LgXXpl zPkN)_=qD1y;Rm^koc2Ri_Bpk_JzpCc;;5A;JmeuPn6y{Goiu;Zr?8$ z&U>E5y6Z(cdY-z3DRMI(#6j0zj^q!Pc#*U21`TiP*6nlQ=~x1B8Rs5?qt&jh<`%4w zphS$3h8bqq8)E_Z_Y%Kpy$gVfHFzQImK6o3oj~lX5Y4H z5oIs5`D#8!*`9KJs!L$-ZYH&w zWV6}?A&6I5GFWzF&j-FZ`d1G$&moYZWKiTN-JU+ZYUM&MGK+1wZvOzSKLJK=>7}vM zYqxg#rn`Bq-bxVMuw_;~2vEP$wC-fIZ7WWXO}i2pk;CCg87xl)fc35&ps_kIxM#)R znnC%q@_fI;ocAZEZne|IwlUjVLI8>;nSo&Y$&ZXNJnbapdwN%&R)k?rl{hw(Hv6o4 zcuI@f&U4-W0G-F$Yr3|OZY`}G8*7n?;w_Nb9dV3j`PU;jl-#|;6yGbC^*q+XMPViS zt;?;vZEnusKJb!_+f+7Aasr;Gtzp`tO{hYbWGqiIte9bdrLb_>>^tVSr9zZu>cTdY z-^|*!BAl$6dU=!EOgzjK1%Uh6Bl!_q(rPhCvD?CmOk?k#n6dY)n;TmznI&MMmwKwJ zNK*)Y^MX$#9@#x=XyAeyGbkf`Vo#fP4nqxuE^=}S=rQYE(RMS2WjIB`D@JO^efCs7 zYb25$cAQ~*bH`t5?0z0<(%ty~0K*Z?-8D-vqqq9x?b^K4QG(Sit!K1t(nz_O2F?xv zC%i%7*?Yybv_rPJgRO~=A|9c$*gZB*5wBT57xJ}jbVp}m%`D< zBS+_bshofY@-PN7(-ox)!nXF3MH;ALM63ub21YpQIL~U#)Z>bMO-zfh7~Gh|nFWv^ zEEmrk$QkF4YVxXKDb%YMJ#DJ>N2MC_jTWQVLM;QtQ|bC8?}l|a+UH5WJIrIOa*yLo z@DBq&LFrU9>)kg&)T}S-gjvaml`ny`Lg7#dP*>;E(n+#6IC%!Sb3}-xX*P6elY1VREEaEXEA|nPZlG3&}Ax!WW z*B!d^Q&SBE6?W|#S#7r8(=>$KrDU~cef+U%Hy>(=8aUJ{Mvci-QH5STE2GdZZgfPu zjfa^H{9atP{ju1ed=po78yghANp_gx@=i!RZv!J7@_F^D?rhdu_K0LgR6+A~1U}>U zhHygq{W-1vuS%^2N^4C&0|`!a>bXVgD7-LfX`<>@BI?t7%8sXG(Ys(}TLfbRl_LhS zn@ZB<`O)s#k3?jRe>M((^cvE>x>>9uNR%_E3|KxGoSn(HJaOL@%wICC%&}TKd1wY3 zc1s>Ie_n#AVQIm)2G#X`f93M9m03w7?V;UTTDRE+)$Q1f+67B=-JOaD{{ULKT~6;v zx^m2JEy|Kg#;WSX_Zej)@y$!3i)Yhsn(6k)zv|BHeAqokdHjuKUL~2aG7Jd zXCwwfncxAFoDQ9Nt~EjmqvX=vj>*gSYnxV9y3`__A+xuL!EFgG$=vSetGVD4jsP41 z^(MNzpAj8O(@fK2MiA+d2#~%qbBvq8VMHNgGi=->-M%mR=-m8=+m5|B zq?E;Nc`SCsG)jI(2X^7u{yv?lR>8tttu14Ay$Lk)75l1 zcN%rD#pJge@%Lkm58z3Ve=78khdS1ssC+~4Hl3$iHMDkGoFue_7nR;sC7T>)j0*D| zWg-`{i(&haPSOtSjGl9r{&m!RB2Ydlcua%;05Mx%{TQ|FM+}=)B;K2L+TTB!;m&$A zmgoP}@ZXBMbieSi{7)(Z+x1J6$3gqU-ne-k-)hY(76S!BgOT;fuTuDnf93o|@j|vh zT~6eGSD&01m;5_BcLDvKMm9{KzL@s>GF^M zDz>$&8w(E*U+LB;Oct`3l*)3)Zy6*2IX;yowwtEQqv|)4i4E1fkR)4JY{a|rSYsy} zIOn}jv-ec5`4!5qvCWtgX2XUAciJ$2@#^dBo5_U)o7Ec}1so4tW|u_MEG};v3;zHx z#IQ>!^4KeGjSB(DJRD%v>v??3P26&;fnG;i;_qpb7v44l;9^<*p?= z_)t`TbXHFe=9Q-O^#1@OdTz_%T9ZdEoM9e(j$|bB_el1@3<&Gb>)NAprFp0w=~~(au9v9E6_oauHy5`P2-9SX8Y&PuIZ}A!lbir6A6uV78m-J48%k_r zZLzh+H?}w}k%5AF9@WqFd2_$sKfvFaTHL6UZi+9a5i^*w#Z|@!d;sB?kT}8bRBmI^ zU0BAw)#`fRg*-c@c(7So+|7TYOp*kc6?>QwY$zmyGqgLf=~`W@2cPOp@IcZoRi;Z{{Z5v ztCLFb{QAY`hqQ5X1>LNZEH5*wq~1wCD`7!aU^pP-J*oO$m20W%T5g+kul8H1QZ_J< zB>A|=$Ri`tx@pr&H|BbJ9F^yGqixO>cZxC*8TSF%NB;m7QM@Se24dqp=V1Q;;;Um* zv$u_*SZpq=rEIecmd@np2pd$6LF?AGG|f9oo5SyYacg^bbeF4dvekwHx@0ysH_e_s zDYd6H-{4$VOCEi3wlY)~3|`gc%shX<)~{-INo}ZI+9QdQ)>esR z82N&nXOHQ{MPMt!z2N!Wo95Iz)vl6Quk&NVR^`@ytF;lzO(B!F3Eo-^sm+-aKQ?f}ThJ7yI&S0`j-g6&3iOc!zY^zTaT93@Un^B$vkZhmF>f4rE%AuCBL%O=e^Ue zoZCFa3^quQckmOPryUI_VC5}UN1#ruQ`H={tD|XpOV6j=%*N{8R*jiQQoMcK^!;m} ziaQfJtdhvF=a$$rkEZJN-x2s%L(}|edujIB^-U&3xsn^Kg_Wka+$4@p2;3wGIO?TX zb+0_qwCiiFQqCLD{{Uy6WJd^a!CinP5(vo88Lg?%x@}FryZnrOqPoz`dsrG$u(XXI z@UcKLJ%06ddU&@sLJYAn9i-rX9qVdK4F^!Wy3w>@aRud&n8oKhx=gT=N}MPv#X%#e z&lOX_))3z7@M+hgMz*~y50XHWa8g$p2h$kEb<&E8z2D$xIbT~8F4icrB6iC4`A7KG zRsh6zZW-kKqo3Bcbt`yMSYv|9=E_zDoo-G^QgEaVszAs+Yf{5V(x%WNy`I|o>gDwY zwA*S%6-pz9!B*!iMh_fTva*jo-M&XeXJn+0J-A(j?N%Px82)vy;YR-eikC*@0h!;ub8Z7L^Bs$0452~K(BxNp;*0&BE!V>m0MzdX{zrPi zA7Z$?wvJ4Ba!82ba2pJ8aD92MM@!whGxpkQVMewD<;;x3ra-{_H&QS_ak;&|QU3tN zR*n6&q#C{Tt)gNWCwP<0Icy^p1d)N0jPX|E)3oN$C%$nhyi2r!FvH8($-I&=obish zrR=PezvN)+Gn!~4mGVP^zwC@3{Cc*QI8|5eK44b#QNa3ctz9Edu$bQczs(}r(o2L9 zuw1%y4Um3Q?af!2*{yeE6ONe8KV^HxOY#@AeNlrYwS!9M=LwVZ6a?exxK-Q9bcM5u zD_r@JfH&;qp8o(ZADOK!P76y$YaNBD`vZ0KSa|8i(@V1JHF8C z<}&cXU9s2L8i`SB-u91ALwZr>Z6uE)xwg^8uqIhN;>YIO&(N-V4`cdNI(!zbaw3u3 z!gISZgU9{#Ucsz*OTyZ3iSKMtu24IQygj0E(|tj(r;OuC(n9CWh+KB#;T6T_#AgybJ(Uwv3LT^saZqS|zTju0v;i z9IphaGY_+nBrK6MiUvXIR|k&u(;9Y-`JSGJ4oSU^ahe#|ByRZ}aDe{+?A0`oBOoqc z*Y1vg@#@mQv$uv=R^GVS z1!DnYWqcJp{QQDI;)fe6-Tnr*jkP@9eTv0K{ma;>$Nq$CtMEi5c+mR zgsiKW6=#_hmPLu#71yH$ILP)rYkR?eFO7UVpZW^fH&KHK~o9*P;S(4IQRr68K)6{{0ewnQ;EDWjd_c%%pzkX1ok9)G=71aJV0bkYSria7i?SE*P%ql@3Q?{BkP8IxfE76c47 zM^1ZI4~Q&LZ3X?P7$fIUGmmlYT`*1FE=_&ksxxfHkNYPuAdXK&1)Re8AxxP`5*uW6l0F{&CMy^aJd}; zQC0PKG^D20y+6q2Qr%dz8%t=AOCy&#`MFc{D~89{`OzQtGhapahIEH3xfEm`Mn>|$ zfJS+(&kfk2cXszCX&IV%#Hx|S3XGQ^so_RRB>D>EukN(DuWkmKBuicFArs@w z*`7BMfuCCCSZO&m?H}N3q_p=@Zrya9@_zD1TmmSlQib+H_Y0 zi)*Whp_P!~m1JSAef#=UVjzO80~{n=>pGe zk;^24J;0N&WDlm}U9PL)%?nJ|^;<=nTkSC|X1dev6ofJe$r~i#{K~+qf!t(pMP>L$ zOSQb$Z1nv@f7$P5iWv-K2LaeFRFFnSbIo(h4&6CgZT;WmbkVAm(`e=7vW{5}){|y< zB>?08`lvjsXyh>E$tU}R{{V`wL-I6jQahE>7JF-}INDEy>{YQZwj9boLr%k9VVK zQnj_@*EiR5TRXx*NP-l@;Hl#cladAj>07jtQf>YPxYM$^9 z8G-)*3md|xpZN>ff9UkAo2XV$@)nlgq$xvM^kXf-*CnMRZC&?e2Mta~{S!4cQ^x`)&gR@Xc1g zMf+Tfk`8~mG5-LeR>hjzHO!(2!bTj0401pormkP@^i>l$W3CB6&pu2qAZx|;W;AXl#o$nPV_-CZnAVPR(+R-63JrHb>)G zr+svM*1-9C0;0HUP<1(NC-{Gn+lSFPSX;@o`-^lFJC+4UhBIvSR+lYxw@s z(kwY#pdTsZWN$187|GA!U8jd_G#yaIXMF|ENO>-&5-OxnrzOBDdJj{{;=JPO=T4gD zZ?OkUeeLBe#BU3pHxb5h*0H08m%Sfo{{R8insQH-7cG-X%(;?B01c(bmV0F67UX}C zRBmC?op)QaZ^_9YF>k=_1N`-_fOvk{JTNtwXOQYq%971*v}{W^V-mlN zi6yr6`mT8V5BSyqO9r?Lb1bPJau8?#079=()HHn-!uM3RS!BGiiq)?4>C}ZXp=>CA z56rx)fzg!qu4BXcEsmw)3k^S1^FGgYCzj?S1B`=$2?TZZ70X8s{pZa801xsyYE@QB z#~l@{GC2s+ZS^QW_^Pmx+oZlwXU=i=NB;m7U8FYJE~#m8E{wC=Tvb(yQj2sNtcd}f!yZjDLeReVu z?ILioyWwy?=>GuXt7}!%BD;ndV@Hi6C5XT|EPvkg0<$lzA%^8+wzHnhtYMxzh6S?R zgVAsZBZ4ceve9&l-6utJ4eMNLGfxDP#=yk~mf!~AUj{vDFhwrugOn=W()gyZ=$)u$ zcqpHZpnRgHo|lLXBL|~Q1x$y*t`ftDOfo7Dh@DRYgm>6UtD=5v&pJ=srCx+>Jufc=_y%u={#cC^%SS`UP|?kGTQ}+fTRahGJ+#O9|0kKQ8k0eNm_*pr~od z6AD_!ST5gH~UET&)sX%*DxDdiGnV z{yAzfWyQ8JBaInPTx-#rG-;Bqe)~)x-eJ=zuT{>cij~l{j;w*~HsY|tfyQx|T%TCE zG*K8SAywh2C`x$@YKnjPnUcupZrfK?a-nfM^y6yA14Fcj>-kXBAA|CnqqVj0g1%C% zd%}n5jTO~n2VC$m&UEdDSW#CC&SxwIs)y#xQNzBo<*V8ud<JFj!|Av1!7?GYUOjb zU_tqW=Z5B++ODOPtNk%nx3geUVJbmUN5wh2zMh>m6y6tw85>qsfNw~VR?ufY=F~{i zr7>zc3l&-o+~erLGVY)G)eYeN&U*M({^XoEalYxhW=MhIZY--XY-Rmp z`^r0+%_mVO<+ilcBJMZusst~tUD7PQxFcZ`ZIRT1gpB}$5ASY**;aYAh{k&Qi?Z=D z&+o=MWDF1K>~=u3dAnctb#&V!WPTcUSU&xZ#w(I;%jx&lH&!fRk&Ut;cN1)@Ka>{N z_n;j;zyGj4idF-~3&5QbE>tuy-&GlXR9FBjC#D3@B z(cMyj4JNJQSM5bS67y|JVMUFI%G!u$_avR5Dvvw293A>Fh3-7Y&tnf;5P+vfNPGDH zSbNow>EqttgfK%fXHCwp5nh%Tk4wfmP7#0v0Rq@Et|NNltI2;)ZzxaG{+=jDh`jNz z`nKAAecx`G7j)rgpV(sD%xgsb3(kC86tJpqi_8vL)kfadi^!WViv(C*-h|TT*Nj(VZ$zVme{G$ zYH-jasCZVrY)L7$rZJ2#;RcVgqq0(; zZcVB@S+GoKZR25DEeqV}jpk@>56v`n?gS60PJQ}hV`EcfI<}-(3&{kPN6tr)+psgy zTn1AxI8KofvtH63$ip$L92|%zryweDAUL z?XSVpGxir!mn7Tb`CQp@l`lE^N&Tj0jBylR{bkWwFNh)1uT3GcTox@!aEXCv3zL8J zicscjizOH%`6BPVYAmaw@(9;N7$v2x;b)U14=oxo{)Ae47qb@60W(()p7}7I**PO$ zC{GRJ;IBEJL7lqGV)Ad&8xSsSNo~_*5}cHpLEdopb8*sS3#o~UdOdUN-I}3k1mFgq zV&ZuSz?z*>?ySSh!rNT$c}C8@Ree0Wtobr|AjKZ_%=H~iM)Di#Pw4|Nh2bWfG7={>T#2YXv|e&I0T_m8Y)e7#jn`KD>?ZT4|iAUStOvrDaR z{I-YW34I5pYDQcOBbK>~k%~SFl(qHUsFk=}`O}+My;G_U;}sLh*>$I{&Fz2E5k}SA z7R<$=`VPBZo<7NRiBmsypE~v!&OrUpZH|P_Se3HYa#35LO~-VCM@w^3_8u5vy+6#=fK z>fkBLafNsD90x)l3kDxfE-*-#^FS=pn^36d?LRkY7W)ep2c6lvvNl)kA^-!;p9`7l zhANcyjp+rUB&-jglVbWA+dGI*1~;xmn;mmO@oy2pEYdyJi;7ehwrmrF1scq}+1#eu z)u3PBHK3+m+nxtlpQSl_49sEMy`t#aqIV;$M{!Dp0B)jSUnIx#$xS_ z%r}F%-+dctk> zMrGBuL%@Q*$xbUFIS^j6P(LFx@@lX-ZI%%Ev6xOD(@}%<}wNq|7 zFYTgP7{9M?Qi0!b(a0X{k#N7)PX6O=Tsi&4gzaS~fxcLMHZ6>0JmBCK1x^Oeh**)1 zD6YFHp7U*BIaCX6UXI-MQo5P$M z**>NmDo>EGW*Bssat~&Gfi<^h5)18yUj9t@(qi@~xx3Ux0HUiR(yHas+9^9lr?SZM zSr%W+6s(dWHtS5NHJ_hnbulJhJUxK`z_umjazGDyA}ydm#r9vjC8-j^%jQ5hFZ@JTnr1#OYY6mCBO2JH(pLJ?DU&FJ-mEv6!;qg{8? z&s23|iTm6;S|>Ist9)S*y2W%v7&0FkYaQHuo@QIeCWxXqCYn5tJn=CFnr~kFfTtpO zuaK!`AOFo*>RUZ89ve1MX|jsgKS2?bJ?4Q*gCt`$Gk|ARl$fI}=+99JPA|OOcXXsY zxKzP9S=O-EkS-Qt78;W3R1w=zyNV!6Z08ZkERhyp9wj99wTUl{p7HO+n29ykzDYP^ zi1vcALr8yviI@Qrz@$oY2yW<@rZ;OHTU}?HT%swgXAUjeJDyz5ViFUsD|c`I*!PI}Ji;b^!v=|1=wa{L@Y+gH zcYXXdG{WA?jbHQNnqAzbb7Nh45R@%tacaVFsl1nzx!m$Xe+CV!|qkhc7 zoaK#!9I-0^F+upP%|uY=#U zOWHEY&4@+-zsr?9^st=6Xww@KY=7qQbnXXeaGALEg;M6Twz-mC7KFs7E7tad(SwEc zK6wODNSly}rHGyvj)o}FCp@fr)rZ|dS_w%CJ>c^x6(ORe9|Yz=Q$9kM=v zn5Xc7i9*=YiI>-oeAb}&qjie)&mS$g#csA8{(e(}j@d7RTlkDKMpS;zw~Ie{8E*a-ghx0l_)=O<6^gq2 zc)tsCSMUCj7B&CT%AnSbW;X&@b1Kkztpa`lWvW))BVlGJ!#yfc#sf(wY&DR4bL(rX z-M-7S`19teYWvGa(FFx+$LMQ8tKvpaT9+0~?h$~v)|j)Ro|p3A#)ja@k9Y=~IP+&% zpKv31z|aZ=K-iT^xU(mR0K#vI?2uh(szzUn$)uCfz|LW&VHZ%A+YBD9Zwhic_ouzLtl^ES_=NgV-+9n& zNFirc4@aDpD;wHsKxggIW=F&$7HSc)(eG>e+kYX0D#zMOr{4YZRO#|L50=> z7WxpU9}irhKUl@cJ8ryMH!1MPwd{(e({vf?LT31?1i?8M>hX#8Chck6qKUmmE#)nu z;Fe-hu2Ld%)nx(&nC!~DsRQr60bka^mP0SwLD##ws$>L!RdIp3ul9cN&C4nKAPVM) zhD;W*j%SJ=l2S5Jh8=r)V`;2jN=56%0#C6=O=@Wgc=uB~8<{&qn3A&v%5Chj2eKHm zhGIUN!(?w1Z0C~X$NheXF!yze2KQEsot+;ICg&pnu>!s>P{!-jDx-zKH#;JOcQp zivT*SV#{>p61WbzjLL;!l0RbYXMW4m`v`SdKP6qz?#X!O%3oG_5U#D9WRnhhl5b7auR(5k zA%{xpEE$h(U&b$O`|m%T>MBNaL!!_P0+&2cbjsx5!lY+;PP=o8II*><18kg|fe@tW zd`#e4w+9@&j-lt-7j>S7v}3eJW0u#jE)jZxsmHaZW*m(Z6aFAolp=NOIQ8|^9t_6l z`$KT&@aKbrM$z-1ndz&m?DPFa*W!# z(!9lKF?XMs<5IjN|c7B$j)?jGQ#k2*!H80${F3gH8HSuz$^cU3`HKuPd zKbaQ5kfVf?F$ZVmy~45<;Rz%vNq{8U-B>*|MxgLz(8_Lzm%w#PCC*ng2mPK+ zF+)8$!B2tDx_a>*epjor zsrq7ss;ZDRFqbR>XoqhIq5bv81}JNr1hw=1w5#S}?rh(~1maiSCmaz$>4Sr=Mn1n> z2TZJnEiZ~qz>FcfeLPQVN+1;6`^G?@=Wc@os>92evLr~wj!ExF`FuSU=ZkuN`#fA# zR2CqL9GwB(_yu@Zhj2&h8B~BKwK2My9vm;6SUxoxbxz0wGVCwGl#Uhd z)Yha5thKI0_e2)WfsA$IMf8v&1W=YD!NhL73V0@az8^nPR_9TT+!GP)IfZ;gjKHrU(tuW^7EGo|Q z0)`nr+K^wAk&tLq5>4F<)laglQ9gDXvG+PvIj$bI)#?W^t8y?ylXoZGj8!Kc$lwfR zvvgposEc{4m6mM7*!b<~3vEczl>9oGId-B6GY7FoB4Zt)it@zg){Dtb#UKlw7}%sW zcpXqSH;5Nz|AqPTj_r&@#OWpLSb;gdV~!OLQFlQH!%5cL7PGGY0_-*CPLC;Zv|)K~ ztXSG%v7JIv;QVF1S-v(Z|hoKIIg_B%!Y zQP(C;V9*>zP@DjO`7;8G!MU)91JHZ9^MF2{V>kDCuWKBv^dAlvh0uo0YQ{RX34c_1?h}Fyqt19E)EDSYS*BX~{I&`K4ZmEVkAfI z5`lw=qt2UtDOFj#&l;%IU8qoT_hEV?5mW;{x^;?^2>A4pmuXMFwiPFS3|>-X7aDi< zf?S#8yn8t}#{e|*I25~p>-KEMJ}ZUCGiT_T{9xxfYU#dC2K^Pvb<|rHyMyPudOuw( zPvUz;o*opXi#TWp&haUN!i8}N{ppX0{X%jWT4Cq`^h8XviC?c_@e8E$|-LpCQiTdS0p8_ z*(={KIVFRRIzL_@fLNn@rz2!kj8(iw0AY?dg4L}0S-Xmfie3t$El(hi6&sE5V`uSk zYQp>BN7*`yGS;~*Zd*f?yc}~|_Gye|POjXlW}k`6rE$B%;DYiC zApopV$$lqrAir5%ogi$>bovk?js7;d7f^#a2)|4@mxXlB{fp*W-0&Z};rgc3jaLPk8yT5o`uVlhE(!h__RgpTYA;l?k_cYq zO1eHdeF9Z+RGWS{rCM%D;CJ&zQt&7>$w{1@4JSX>_jQ}>bZ-f|GVi#PxObuD?2@8D zmN`@b0c5sJdxHO)26=dmR7b63&D2mt3v2Z~x;vf(A*J>^>TV#c70E&fL| zmy{8v(5>~z%V*ZsX7EE|X`h*iVh1AUyPI=TAk}4%eb9W;)&heT4NST2;F5Y-2lJ92 zuqus<=h7YbtJ0yeou2`1kM#S6ypDIAep>gzfQ!HN`>HmAW>{qbHM#QyO1D zu{~nBSwXEEv<{Db)rE#|MP2w42HT0zC@)mG_uRVSQ-zvru9U^6g()JhmH7D{r{q8VjQO$Rk`K}MLdU=%y<|u17&E2%l=cez8 zUx&-k#An+`EYK9%?ZGT_{r#h>OL{W(ws$EY5&b9SFoEP1S)j|N(NU+C5<`Lu(v>Af z$tCc77a2`IO`YfKk7%kfb%AoNORcH}AAxZabzYnEap80xcb59U4Rs(gHKB>%AKBv3 zQ(TFzfC)c@?#PGaqriIeeO`F5Y&6__9>$DrE_6v?>zx&q3rzFugeP>(@D=>HKW?$E zEjRJ9F|p*|4hrQKI1kcGvhbS++39N~Z)**2Uso2T(eXvjy(#~M_P|A8nfEIc#7sOY z5yw}%b~5!5l|EH>ooZb0`rGF2J-?E|5-KIEX~QqOmNPriBQx{E{y}+3^K)vsLHrt2 z`X-btCD~pZ5h{tderp0*4ME2C1wXsMvSFLp0s3GVNQ#V8q(>gh<7-!@x^`5w`#Rr9vHqu?ndernv5o zHC$qOhMzteDeh6TxsYuH@N)Zl^!JwAjmUk)C~Ox0P^z~+`P(8>S(L7gd*{F?{8@&M zDQ;kInzRwiLZ~Tfr!}@f#R*ml^XcovZ@aXH83)Pp&YQwNkLrjKfNG2P^9KRUx24-^Qp<9@Owf7vaDA> zc1UVWT2q&vOOMvpJU=k%Xc(KGVY# z?_`txV{rI$x^;mDeH+L3oq5lWiv!=$6FMu-8+nhp7C~$de$)iZ7p`N67D$o`VTw_I zsY3vyyRXJN7U`<7NxiKMO<8ttE_bl7Rcg_o6Rkr(=LefK(ZDpzn|8JtNoPBb0}gi< zk2#6OQDa$cwa3rj?~8_HH~Jm(ov1wIj1BDVh<6N+mw!2bx~uH+?kyCjQlrqYV ztw`kS+S%-^&yxa@Y^%2FHtx@I!1DmOr@s0|NE8amN^VK$Zl@<@d{!4$8%6+j!c#_* z>)+^Re}Zti1s;IN_g>>VxKYWkC8eCFasX98s=qYZWZF1o<)D6Yu!thvqIB(&^kT!& zbFGar#u7L;mS4upPITIke~2XjC2|~gI@G-p6+H=?PVuNQ8?)h17r?h;DC32_STrB; z;qH8ZcP<$tgsa>i3NI3cFD5uvQ($gbCaPQ_ca4TqLHaA%i>yMXd{dmyuzZh`Y`e-g}Um9#)47B4Gv#dNL)9NxAnBhI+={-WI$z8v+$MJ zPTA{v_auvIxVDF2S)fCPc@N#_`MV~!_>7>wF2ZUC%6*DJanXr^m)8`U{yVQN2}fep z$V+e)TzW96H!-o~c z6go}U4iABAN@VDhd%l-b>OZ9Jw`Q%XNoH)HNl&?FM*9VY8Iu;8G-+v+nXi`(vi0r^ ze}UW)w*!nMRbOxVlkCPPqxAH`lsiwf#p-RuO`xRown1c^VeJk&LFPg`?j{fN7Yr=R z9c|D?%6koEA*h}?SMShK$JZu+<#??CH==tc|Ey8`BXW6Xk$6v#)$Dp_q<-|*@V`e+e{sNrx`26kr^{T6)gfNmINCN^mSLV-}E?Regj>^F7m*sgj$$WFb&TOmn)M! zEmkHYvGXQ4N{}h;Y|CSc5q)8t-l(lB!lh(6$+=4l*ai3JI z0Ul0;Lj=~ZrVJ)ITm5P5>Q+GB!nG}c#rzkL)mqL3R}S@CAky10xe;XRxEqxM$vH20 z6B8qjk%m^y@!rbY7UDOr6vf_y^CVV^p2U=yhG%WF3SWE&0bC>(o@pY0SVlzR)8jUu zCI8OQ9cpi_JvMU^eP+HkrFlv2oE+O0DAwQGZuFh@tc$d(@B=I{#+2cQguuK7)v+AT zH_B+1TVt z)Av~k$Z#kVMGw9biJ@QL1KDEn!zjIn&~69BX}Eq!CVMGa^p0MUj%!?suvVsVm0RWw zVN7$x3ax$-=$wXAa;@UYGr;85qPncQYHCIoGEQjYCEN`)El~+`XxMyy;5zo4U*xWt zq@FAOP6)9_04dnsPu>zUR*=07t{szK6?L6FubG$hu6r&>YySue=}(2;i|82ejQpvm9Q0TdkZUHfcCTOx(L%VEIu`){#88cD(qp zT8LM=Er0-Q@;Evslk;Ymlx$dUIPwHWS{Sj6ctDiF&EAC}9`$|pX#um0E3z1_DPSKI z1YnJJO}@D?UQ~n`eI-}VEb0!Ozpvd)c(2WK+XnCUzbVrMY!9Gpr$k+ zg%a#Hd?<$3bY6&cYW<;CAS^7csE?^ypw{{65FHN(nCfRpd7lj0+P(~cw@IAH*fpo| zs5Qz7kmmY)4Mq8Iw=6W|{`E&^%k@}VSc=*Sp75ao7s+Mxn{nHL#NFKN%Cah`z+0=L zf_|w-jn($tof70dIwm@BONWvCFD>p7c=o)?aRLJSH1R(@NNH*$cwawJD+|i37GZDH zw)*bsxX{_Iufo_4a?xdq3E|nK;eok>hxiQ+D`pBRZrJlJWJH=1<1Sn_UfzZZ`^I`K_?bxHlryV8n`zQ_mcHZ9KVJxHmjZ+%yt zF$l_*jZ|kPVvET9{wfKEk1D*aa}$1htTJ_LrR@swfyfU{Rwl%x*1a(<`bed0HnJq+ ztQlHk8gX~7>Pj?pDUW1Cs;>3Y^!#iKUIuPB|4%HKb|y|8_oasK+e;8ifNlEEo)%$0 z{8g(`Fai*JdMW|eX%Y3rWh8dA@swnkDm>56x2LPbikxeo#-X&XB;bAE)k6!D7vK3l zdseeq30qt=Dpg>JsPKsez7X60z)Obg?!Obn|GnBApNER;_#m}?V%7k;1s*`O9MeT^ zaq<>-W}VArz;2@@Jl&Ksn}piA*gUdwXlG{y-B}gBv@U+5AN3>Ix(OtNQ6ohPn@ly=MtzD5F3~_GZar9d$d!)rf|_3#yDfAX_l8;oD&zjNb+;>Z z)!UDFI@GFa-g{1Jp4ysOi|9K?TAK2Us!+t#^^n@pQ{UoBoRGdHyzTE#pU4d{lfr(2_+2WvmJ)y8 zXAnlzJ}BNq?FNL>;BJHN=l?v6HTBP%p1p@lGZDZt5zVcPi#aTyCZn_y0kqa#c_DyT zok-L1QT0A1^8TL#;u$r$3@8Hdb3VGc_Y3=H`7HnMguN^)8v_cpw32}o%o z9}T~&ZTZv22N}NC)gV%2cM|{6o%;VPgm^kZ7k~$}H3z_R{GC#7AACgsf!Pvm#R2VW zS0?fj7wdm@`y;!OiNCfpJm5Wkz`uu}{5~KB&{3Oe6OluvB#s~5v`3(x*?R2vaH5o7 z4uVVg4d@ULu&e7zAsb{PG7~1xv25XDtjNp#N}d^}qMMKGVs2&X4to}K56mHeW8(RN z`$`0`C^49kF8b_DsPt%VWm?QlPmbmSZig&i^)rR12y-fqCCwG;pKC44#pub95A;FE zDOn%w-uv0mBd2780Iq^1R&;NX89z-6gaCd~c;D02>#{}FOl@QNrwqq&O7$x$3s&YvPLVzdy&e_YydwRE4Oy*(tv65!D<7c+X}GWZtAY z9%o+=xeCFLkWVou3B0mXn4a#vgW}S(nQ2NqYNASMA{oWMtNFV*Z)pC_M(aJLmBzlN}2fbNb} z|7L9XZxX(Lkh5soo0umqzSFz4_R8bGCH3D2NJ&XT1@DQ6+Lo^Wb$3nB`FhbC5}k9R~lKo=aP_z{GVyu9?G+?6tb9}h6o|c-IL*S&EyU7MtZY3Nr$vu!}Atah`X7bnhhY1Gl(>EI%w_On8sGt-=89@1Im9 z?C)ZLy=92%;U~KSA6?%2dC~vVSt`8G!_N7lIlqBTkZ_G4;R5}`U68(N;Q6xaKWx&7 zbs?|N2Of?zjys7JvQgyTGW^r&$kBiKl9st8`eO>Ar7NxX_|04Y>_7a!FYDRZzJcLL zCf<5?m;ci`F_G6NFFbiOQ*Rvpg1y}-qtar$y0r6ut&^zvkLz08%lz9TOS2Aj3BdpF z{BJkCqDG?sPlGntI+PEiMAET200EqFZ2yCgpj-ChfTjQ7XBg*WA|$S2N5J+e?ab}%8b!nm;T}dG$hDUO&XvwN$ znHj~SQA<&==jPEN0R2?8+ik@UNpEjOt~^h8oBjD>&5h*+R0-vU8E&J8uCyfZz5hKj z>G3$@dfOZsiIKZVNw?`F_4XJwVnQ!xeq?`{(QOm!P<&GU?QcDkEAK{->Z~O;(s`zVDrX>vig6Sl>GnN z{h(a<7#Hb?Ljc}?NQ+`+sBP52^WV5f`$wVuO_2ZSOmD0KbJ>IQ6;7W7-gfLIL3c4_ z>FlebP&{dNG_=Rl!P`HzC8lz2TC?9{MIvVY4^T@31QY-k2nYb(P1H={E8TTb6#xL` z6#xJ#0000_MM6+hFKKOIXJvCQVsd3+Yb|qSV{~b6ZY^_ZWMwULV{&C>ZZ2?cXSJB+ zR}@az_bHM1NvG1?C5<%e5=t-Zf|QiftblYQ(zSp{Ni0Y$ExEEF61z&f3zCu|-7NWV z{{hdt`<`=Vu5-PZITPpmozF~yu_5pQ#X|}L0)hv6x;mzJG5xOElab!Fhf|j&cY*MY zDNu_5F~YVqOb z!uO`BR<&N|@3!jd444pka}vODf*%QCMy3`8xpXXDupV*W+2vw3{iy2MT*w=3I+Q|4 z9dbnJ&Bl*ye6ecC>B3{ZI;(g85s?zcL~5eA>}jOPSV;c&(!*l zPe^wOfPHmnL`sKzVU<_rSOK@w%S&)qo?PV1KsiN8p}%AQpjvKj@h<-v$_U>cAxre!X=uWR6x13Mj*Cv6)U(;_XVeX&!+Jup#SFja*K*h z(Z{aY7>vO`+>yFQw2C8v+zpe4%fknMBSp7hPIXf6T z-khAB^BrX8MNO~p$i5m|8e_D4(RhkHTd)RIrsf{pCO2MtAYEZPEI#_t@^5ZWA4E|dNuC9diCJVcbmjX2BJN?s^_F@yNI0N>VG~*6HGEc?d zZWM)h{SG^u&y{&K#sw$5GNDud5rgB+SC2b7^J>kBIg#^|`tfh|Zjataod1ajawtLj z2Y;urNb{_?4pr#f58QJYSY4wAk+2FzY%ib7%eeoU!gSoh4YZLSh z4O4$3L*CNkm_;lCTOw|6IusTH9Vf)w<}z36y+5MkH@+DVs(!EowElz3bXSfg34p}K zf&v%6h7B?^Fa#FO2S=>uxrPV-Es2-^9e$}k(A0POLaupo!5*;7`Yhz%JV{$C4hV7M z^+{`63=g4UDi5_MvkC6K$6RjjX7)6VqfYAR16iEgle^ixVrZdvj{t_)mB`LjgKDmG zZ2}-N&4Jn;9@tar{QH zop=yS-_>s&=y-9&P0(C-(2+8yq7T<9n;A0rgv#;oapOll&DA*+s-?Sbyj+OW>A_s& zEkP&VyGAtU&l`(6D@I4v%dYj+)zGmr1`3{tZhDE77BB%D|XgOb!V#8)!k~&ArE7KNci%CHwk>*6+gdRQu)Q* zmxTXYme$#Pthb(iusH$8e0k*6&b_(YwYR{+6`jRsLm;^xqkHNm_!Y>k95nWi3?Cd- z$I}#fe1fwZZ9iX5szceU9IAOma63hAORDVAnp&(e1WL`ZQ%$Sk^IdclT}#$3hHU+C zHgCoMM>p6I#v42r0MCxy)BDeDv{=1kkvktIM3p1V{K1r_rvCA@Q;W-KW9#WN@1;(*(;S*?7`zn~7q64^m~JpB2gD}W5ycyk z?bLnZW>>Onu%tSV+F>M~=Pg)9PA*Z}*Ygi8h8R6L&}nlm4hwjtr|{s z%VIYLvAW0lMr*?tHj!iQbO;pv?<9`i!P1CLuzBRp*I3e?AbobrJ~Pufs2apmv3F*W zXTB?m@*uU&NP;mHvqCD-Fk zZ^JdeyvAO3ax@ch!^B3w(~K{)`mN5H$N6qbb*})c>+A9u495N4eo-D5_qKUY`1Lm( z2;_*a8dKVOQ;d!8@vFR)enww1vh<3xVAVHF4RUkkATT7TT;2;OPM zDVNMS*1BG{-rZqAdU|?`ztJsq4iSiMBJTHeMmUtJCwrh5-|)!(qLT|IuVd*JX`vooA!!NWvCaLe2KQY(?)}x_tggpj zIm#(k-Q#pp9w|sXR0;498Ml_9U)6}rRXCWX10Pxro6rvkDme1Ykt4Ema+pTwR5Z^F zq2w#YPg6ZYkopj=Se9qJw~OEiMq~SpijyvlV(3)Vg zE?>fCKl6E5n^wZFRpPJlG8x(_XXbnwmRS%C!OX&h&6Im?Z!VDyh{HxLo>d{|Stt5l ztqD2Y3wojc+*bX#wKL)9*+-_pRCMxAkI)3#!~g?!a$0?mD$?3;6C&?5r#R0x~IO4#@ zc$$}9C%!jC6eMsPeUI|vicIoGM?vhyozcimGKFonMVNg3)NMMGBFZwNA6&5_|6(_? z?AU`gZ~m!{Deur>KnogdMYYm#+p9Lglv7a1uIwk&-nEKkdha2fdROqA#>OCz&YI}ql{Gb%(O-ptjnvY9(@ZXmLYUdPAgiHrE@~ zU0w%>$#Vh-C_9L33zs|fy$aimXj=&0YP#!VrkMbsFfA%oJy`=VnsUXyU#aW5)3EzQ zxt`6P1~yBWj45pN4(f{(eP1eXctuOl2k61Z9OYY3nkjwWZkLyPJ#E18?u%wz{ScB3 z3R6r<%c%Hitd8fCgR|Ct_NL{6*KpB90ky+%@#&z$kH*-Lu7I8Dc#z@w`AS*{t*kj( z#QoOujwd$njwk38o@?7F9(iTa#%(jR@LJYsDBa*}XVx0?mL_3W6mK6o4OssY^($0)>xCIryA&gZ zC-%;`$B1dJo#3RP?Dh$>8vPkA825J2qW&KqF!z>b8-)cV@5#qsuDw?4p}KgcdkjAg z7qXulR+~`uFdjC(@%s@=o{m1? zM-(-YhH~){%Upl3HhbBV4RCYffI)HS;V?c;^%VRLH0Ue74&j88QN=i`!4KSK<<7J^ zs6D3}O2$?8y6Mk$+7<);?0?9aL{E$_AVW5Mv4U%-v)-drJ|ZTZLrwZn6fqsRwJcg} zf(E>DTmm=9l@HnN*a&XApBwqzKV?7E7Aic3)qDAJS$?3A9gZH<_q+p#kTHJn^Dz8c zuE}j(vRx%!Y5M5-jJrn+T=>Gck$~Oo*z#d zg-FrLWu)dQjzFaZ1YDSNz8F7q0!=kJV_xP=T+Ft4d+QXZfSXDS0#n$OWKsz18!_mT zy)&bM8ZxZpuhTP1je=2c$GXKeHljnoDR0@T6x6rK)!HA8c+)Xb?g%MJPY7qc5$sdN zw9j31$W`$1#~0J>7cb9Xs*h`3o_h1M=yu$Jo;<1U;jqYNIuiAql0;%9-JhKRBl7(l zqWrm&n3m<$*Xin^+>U@{)6b65wGVsdf;I~C+0+djT0E|ogx-evfw)K|>#rm1)7lWB z_t7%gJoMnq$GAzjQy~CRiLK?-M)CmJ7ZPGlxc!b%dmezstqf3#qs3cZ84m*+T%!p` zI?Rs8ByYsUk>bzJEV+2Z!hKJcx#a)yml(qNj4mY180obeKyW=?stGc2nby{kB38fJ*RF5o^)pR8yD9n^ zKbBnZ5^46!(kZjn#y+kf6aECcw2;(7t8ZBS=urvzAT&^nn_JUDn6U=72{oMlXZAQi zwVTBDK53IFqMxN@j>6Kp(hpVjPCS3Z56-0% z@FQmu8|ly!U;A^7m#890aaC^8Ur!$f`j9FVSO|c{ZJ@*a+A~Mvz(XGPuLB!_ix!{7 zquV@3<)BI3w0?Xwvj1Tx*7TJH4Bmf4;dA;OhjX&{7e-3n;Pvw)QY(?(!jo3#Z^1yr z7e_CL7GaB*->eqs?2Em%AaR>FmwS7|ig5s?Muy7UI{_TAp?UGE8$uJN9JQvaykf+i z89Agia{Olvvh@2=Ppo#g2~&!86<=OZ^}Iyx1ZXNrWQLi%$)L})ZQLN3SOrv-kGvutT-h|t<(vtcfQqkGOCj$VkbQ_Jt+ALP z{g9?<^sQ4%*6ZVl49)RF>5P0LRjs5(x05@U_XbR>8J>Cddk>$pLixle#PXxyRoJoAn%9lQ@nwc;Q_wRG7efb#0 zv>LX6Fk??m7&QBc{U;#5$f}wc4HV(O$pCU3Tu@TdrPhGI7Dg2Dt_5YOiFo=4_7u!9 z{N)cnQT$xKeB#RKnu`mV;1~_xH?v0d$>^QWA#;wCOvbP!l3oMsM2%I!o1(4@g*XXE+tFQ zsA%sziQ*)eqz42-myymh{FOtiMmOgSG*+aV0wVYvOiGms+_7R!x{th-u-_7V02z0< zfi7pu8lC2^oFFV>{u(SdZD z-G%P`B^lc~bV2tusmd_(WbvAL!dm0QQ$pHLf=6Xd-&$e88*|%>b?6&}gdh&q`{Hl? z&4Gk#vUs{?YDGI-J?PkVO2|yXQ(i5tXDWY5=m^sp69 zaC4d#S4v-%$WjT18EAGo=OvYnlpt9{vmznRZE7*u9l~QjyZLT=v(oH~SM?(Xjp_z$ zMx=UD8fxkmjiy8s%NuMV-Vk_rgJpHJE445?>e(J@B-j!rg8i-C6kfj#icp@fGOM)l z(A?%$Fnflm`tS)Jdr6myoLVuqRkWu5Loqv(I@Xwr^%%J*=P~|pz1=9jTCcf{i0xPY zFQUH#j6$K|dWc(kXcO7+rpq zJ0MZB`gU`I4%kAkU0UCKgFxqT)(lI##dE+RKk;Qw)>=3^2NeY~Jk^YarvHn12BG9s zfnJ#-4pT2eGqrvyrsh>H_x<{-o#0MT{kHO>k{(}Jp?R8+nJWngg(#8Rl2PFG9RSmIh^CBVq=qY!~6wgwULM&VWBSjBga zb?h}4&)N+|qC=>5wLWDfLJL~iOZJR;SD9&0T@2&{v0UFN{<-lQZtdEoqO_SH^ad7H zW(|aH>Zv;JkG@3nC}E~3ZsFNIv9;FL%1hrDEA{}d52v3Fv#W<+G2KjUrd!*fSCb7^KU`x_BP4rZgaY)S# zHKC{DM0Lab-|5+n*={dTEo*|vQ~>{M+((ziY%!%p%o(-yr**04SKY*cP)%YlJ9URq zqMC^ipk?PzsIz!mnU z0+0_puvJOcNOQ@=3jhDp56k~=`s+RFme`!mw>-eEsh{Ak=m89M5L))p{|``00|XQR z76=Fc0|Hx4>4^uOV^RbFbW;QXB>(^bPenpdQ!i<4VP|D?FJy9IWovJ4Uvp)0X=QU? zVsCG2b1rIdXOz5SlrBxsE%>x;+qP}{v~AngY1_7KyZf|l+qT_3?|1LaT{Ax?D*wgG z$jn$(5qn3jsNc2UJpiJ#xRf{m2nYxu`QHQh{Q;neIGGx{n3@o}Te?^fN=V85`P~GF z06>6&|7ZLsp#KdpNH8!^P%vl+2yjSPXjoVnXc!nccw|I4cqDij7({eLBotIMG&EQQ z3`}%XOk`9v)c-L81oGb;P%tPkFep?w7&z4b-|{;EK!OAb2G#%pLIMCI0f8U^{SE=} z003YRp#KrT|1DtPAfOPyK#%~a|I|1@|62|CKLP>@1PlfMhxpw9z=HgzMFc?v0D#_K zl^K1|HE@H@xEB}ZGdTos&JE_p?APB>-*_%grqsmRwlp<<^GFc~VruI2AT}Ne^RwN!Momz~TYS`#xT!;@mgep3 zX^L!?O>MKG4~zn4&|e6dP>@5r@}k5NzS7rB z#hnJfDw9hMqckPknR-f~weUv~J~-EOt1OtbK~?Ac0}%*!Z1!qiyyXI&eJ437U!!G5 z;uA`3q)vQUO`)5x$>&QeE@@esV`!z%?xNGCUYc`!U%nxx@^o^)8_cpWHzX=Ngf5dl zu}sq5*1)6{8i9a7pOkxK*(o8zI}-@w2uQRJ*z8gXg9}Nf6e7D6W!5D`K*i;(#K$Pa z()PRIRejoqb&0~hm=W{zSE;7v(I8c(6Lld_Y$RlSRb(xy04mm^t6gus>Qmk1Os1td z41PRuW7)hI@|hde`r6%O7aaIw&3)ck1QLILi58u*Fq}^vE2~-!p;k z(U#B=2Fk#ZTdV-qde7vJiYEK)dUcB_N~X3!qVSD6I}C*)mOw5E6t>$4ipjOzVKK=WabXSF5EXVRfi1@hFmT5 zh97PzE_^#adSCO~lX6CmJtSY)q|j;BxeX!+NA7!Gn*$Rhsb|+TMAOtBUqmvZXksjL z*=bcn>gHwkC}b711*17H6hGpMl*=BTR+(2@IC_?_ie1fx<;ztOu%zId@!pWSo(JCz z{oH(S4Kw#4&AL|31f_qzu;A4dWqav!UBjY{UOKk$5?CY@{q3Albs{&ecm^KvzMv~q z{$~tpfs$RMscW!RyUsXk%3+ZnOH$t&p|t)BBj{8($<&1Uhs!PKr@zz(1{dC=HxxerR?yx*a`)YTp- zZaYk-Arm(&&KvM0;BoOBz84NtVa}fNyZPS!&*|5OwDboAgHmFKnS!7AO;#szAIxAJQjc2ahRh#} zBZ>?ER1+PEZgqkKhq^QEq>l^z~#LhSt*cUKGE777oC%QNh zIK8M#xbM~N$@+%)8Nc}PLal_R26e;*60Xu%S_Y*O-z@S(%57$<0zGUIi2U7c%(1aD z)&GC(`F|b3k@}^$TUa$e5u|h*xk)SFd)4>CS0x9fyRI-Aqx&GLwdoRa4}s+g>W`YX z$nkZxAuXLBoKQS7*meO$4z`k6%9#GRBrjdFx&SwNs%nH<^;Vr9%VOJMu3DJ0DC&in z%l5msH)XlJoyL|r$-MFm@QrCnbk%mt`>aT5Pds{441Xa13Bn<*b|~Ztmt6B*5T4dSBWCi81p z8lh`v*S2|$kt714tTfICPovm9ZR#7%7kdvT8ITLv6RXY%!7UA>iVD-)D^Ng0%SeE= z49)2_?NDiPTIL}!-HKS%`?R|~UYA$FQMSw+jqr%RYF|d{L%gdKTvgHY3I5^dJ5`0X zPft@|_Cl6^?DI!YN*5!NxO%!i+e86Bu~RgtNkjAK!Tm6k>fXc3Laj|!s%?divfR}61W3z?cdPo1yv znUUt}+=1!aXCM8C&gjs_M}9d{`yHAq8Z8~E@T{~)!O0x!KbH?i7r8qAw6x-&>QJ(- ztliw)2<1#k`#ic=u|};ux93haFQ0}%LsyD=xnd_@pR%FM4}w|yaQnZBeoosDo8}46 z;sKPI$>78@0$s^<7hj>%?#)xaWLtRM8GJ^z;M)}uYzi(-(bFaNY$X!dG^^hA zke07qIW}I#Z5iAuJ{ua7qE2p&DM?$75q{?aSD}AmP z)vB%P8N)O5Iq&8Sh-8Xs9H}yaoELQbI!b{D{*TEIe2)q4F8%b40_`ppCovrs)p8JX zy-{!i!-JROw$aKNvA8C1{^d_+u)&0O_vvgntPl+NCg|#|D3-+Zaf#yRRdJmqc{9^l zoqaqy{Yi!dS62Q-$YtT6Y$qwL13`*DpZywB0h& z^Njc1w1^hwJ@87NBwO?K*iJPC1*=x@#V;bJ&_YylLy)5E@iN2UXlV}3A(g&iX-ZYb z$Q!=aqWU^>rXpLm&K<`}?3&_ld(|(1=;o;0YX9uVd-38Oz9#sC0!s2SZ>ppdQK93$ zTHDWA2BAQBWKI9^(=z`0Jn4(~y6Xxj*JC|R-q5@re^#n$p70+(dK#se7M^lMc7|L~ zr7T#q&vo)!_h_=m`f8VOfU}{dpytC+Pz=^JhMpQu2w#X_+~lJF(ufR9r#n3g-7g@O z)uMHy7{g>+>Vt^w>d-RH`5!laaJT4ZcEh)6zSrm4*QiOJ|h;?s^#Sh@Y<=StXE?RtULI)UlOwW?E|gg&M8wJxEO zt~l*_Qv#sFm5yhW>MbsJVvn=M7wY}|| zA?Jg)!nop`z9p-ec1n>HlFSW5cxF>O6o@Z=qcu9YH_Vh^XQIpSF{Lo&*v%R$Tm9lq zWg;crFp5~x;qH{hm1`V=#&pE+qSO6v9-9`Ir~{>FWgXTs+#m6 zsJbIXp<%BPD~Q&s8&-G9ve?T#?xa&l`8!Vn3uoI-J;H7K&xz{Yzxs zICs=Kj<0a23OoVNAl^Hw30U0j;Jxbvy(F&Y(HETHw2f?P@x5%vt$sz=;DbCcBGEVR7IIEI9YJ@k7vgx zR}2M=nC>9`r#?$$7O+$ixL@#csYo}^tR~T z=9B@xr-m=gh(SIREv#@j^D&n{xrtkc_1d8NsmjLKxHAKC6s%?E17hMs!&zz?e4q^0 zYEq%bf7d6#4I7)~+c)M4siDQGd|}KNpElV^ypex7Hj(0Jr?qBe5k9hAbEN4dzq5Y| z;Erj=^3(R{7d+g*9-16Q6UmnBh5G`k;V77)u-IL_nq?fHYg%tdL`Ju5@9UhQSR~c# z%dS-r@(PsrDS)R7K2E;gtaW9Q+Vt5O5Q^1OT2dDs<1hF4ej6g~twtlZqr-a!efq!Q z!Bbn}%k4xKlZ8SYB>~%VJyVQV9Pf5aN&cQ(!hn3Unk`ljM);u!D47W+Ja=U2tA;zs0U4@C93cEao>IYtrM-nJO;P=b<*C% z+#K(fpX_nc8;%EQOXQNDU0Z7eL~x!^{oy^~=bX6f`MIH}6!T#kU@o4Iuz-NoM$(>`oA&S0>AT$C1h9RS&7!_ z66z6jGy2-~S|^Qyu=s|^x>!UW@7Qf33T$*xOz1BlU(bD&*znSv)Ro89m&1BS^QJ`H zw{widqu*Sz*Q0>B&7;oy50(WEpO0fR`M`}^P?Hqsg!9R2xLWXrPjBTQ>@k7%eFC_O z<6OF&J%*0SD#Q7Djf!RE*5hE*cwhW=R7tj)UXW+wE5r~6b-IFhktX#ptG#qR#~D&) z1go(>r%oOfS}A{T-KH^5X3u-#9gJ#r|S1e^)&< z@3)I@!!L)q*Czy)<&rmjsmwF(I36SvD07o1U88GDNouWrX%9G%$;ZK3J=ix3%za$r}i z5hSNJ?b;q%-gbMf{9?ryH{a{DFL~!eniF+xXy)~^pe;Y@wE@resyR$OOP?J#HaB<{ z0z>F{5FTkV;Z^Nyi_Q^aZ%Pug`3)eGK^=L$>d%KL)^wv!8g%%UyASnn zEj2p2)bYtmt!mV3z&SEP%%%84b4+b>$&0wmep8DQ+$S0;xSA+1F2;pP-m_{?_)et! zi9oph(5F8)>)wIx3fpDwW{*?O$ml^^3NnT=8-C8x|DJvU?D5eZS(>o1zW`;4STx?p zbC5yRSP!5gp!>@4rIRKNvnAR)`Hauzo2wnqSTA8%Zy3wyO2erd2pWCz+1)zC@Yuqa zSoD}Jd~h!N%ndRtw0@o4jmXbid%e?MfAD;9)XVH8sJL$V?> z3KHC&bXIOs5LD--J*;i3eS0nmZx|k6DeXfV!6uF%|)1NFp29{1fy`_ z%^=WfBevnBo5ZWJKu~7T_l9d;LW<*(pn&pipmUhy%&^6!Rl=t^x?V~u{)E->L7m2B zGY0KSTs%uNz6-0KtKJyrvTRQ=4oaNGthO;e#=r&{^|;Pji1KLur&zww_2pBZJq~CYZ2sGxC*fS!i{qinSd-vg zqLzcz<~q%h<(Lx4P%+Ra6A*o0PRPKd@E_YL4s1@0gCGxOyKlYhzWb=@fLf-J?QM+5 zu&FB&eCN|3Sh#!`+`7UPCL79m*t&%u8OY$c{>4J28 zPD#W@%kJm=BdcirhnC;jn4Rk) z?XUfijITQ(S$M7&87{SYEwZS6im-{=a44F2z^Rogk~`ITgu+xAf2vuT0W0ANkaz0L z3N*8M%Lym6&v87rJ9OQ7mQv3pZ7W`m{eUl3yekjzRe+SI-3&5q5bZocS(7Ei``5}T zbGfVX;-WHC7P_na7K%RjdYaqjBYQq@s&{XV9|x_5Le59m_3=tj1kc_wb(%^w>x@%V z*Q%3q;2G<)tR9tBWzRwkEaokO$DQX#d1pfg-bDvpIqG<=Zd77M)bxj_jFL zn5+yDs${`dj|MsRrqp+<1f@&{XMRPlj>-l5z7`C|WQ)6%p5{fpqo9j667RGR?Mb*f zl+dvd;5_-q0*(S+rVv3&6BcBO$}t6_t4lLJ!Rj@Nw#Kx??woYcXU)`gnKf12V{PtfjKnQcR8p*)>>WLfnvj zn*J3M2NFN+n0MuS2>oJFb>HetlEzcM&mwS{!79yAQMGxJnG`Ir=}6+NPPMPgcN3V>9|z2;YT<(TTVlxE+ZfNC5+t)N}kNBMJ<5{Le#d@ z2JbtVsM6T(czSKYa9Pi(iOp}G-A|n1%7+n3d}&nm(j*vnxv#Axt(*z)!{`}-dQHfw zO?uI;Uf5GYMsJ&K(o))6&~qx!7kWbk5{~iq)}{P4H0y}$<4N;A18HnJSlp|?5x<;*z5;RQa{o#@ESI>T-?eorVL13EtXl-%iKz2L7MvAsN@vj zNw5F0=oi0BnY33l7ZE~CKYsGN%Uq}(g0_T%?l|0TThd|`5+4YQ#nj%hm~VA5Jr6F5 zV#IyW9=3GK&X)FRb#D1F3}pfDrx=;D?^B6y&qF|<6UVJrD3^TPJDPWH^oMHWh^yKA zQEIFZD`Y(BclF$u-00l+v{7==CpTu9pI0%$)6q^R?MxIW<)x;5FW**hpCPpFUQlh@ zb-{(!dR&}5*PZSk2qP1q29WZupzo;uQ-T|!&p@_rfW%HtzEUUF>AhB1E$Ipib_;FG z`XetQ5=z(;UXD)rKCAubU+gl8)uSer){dSRVQ4nTL_s1nk3%9nu-ZMu@P(Z?_Hc&= z<&TsXZ-g;TRn?V*Y5*6){}e*iJK+!bmZk7FJE>?gQlWQ_sp*d&WBj5=Mf5sZBV_Vv zb>=rt>Egt5=!7fdqTg~wp1b-TiKbYFY?os+CAlIgHGnR}3f3qz3Y;}8v)F=|6f_2d z2~HM*gIFYnJ>XAyEnpj3^w4E`N~_^5 zkz+~4uI@@5@xW$C$ihl!QvYFTX91^OX9Sm?NxaeP{_sIubf)y}a&CiNL5tWL$vRLT)W;ta$LWvqN zQX79Lt)|)q?n1eTEB&-foJeJd%8)djt>K}k4d!PAiZhXhCK(2}&VKMdsF_V^sjMJ0hu!4>F11H*cR{&bh> z4{m*GMJq-%cA%?epr=I{afRJwrN!XJB?lQ8TV2KFPISxXyn3^)Ji|1pB{h;ojpRB_ z9KyNLSY3A;VjXYmD_;eTwA@@!#MR}uqShu~+8e6S4Yv26l^AMC;$v4@RsQt#lh_ZZj044R?xZEY)UkwO>S04-ji#kJYK||=@xyR|Je|9$$jetc$2sv%wv@zuEm>$|9hY(*VP((JHM zjS(%+kRtZDkSE!O4{K@JYl1shDOh0q7Ho0_CwVE$On(pXDhgAJe_76GtXIX9@VAmx zkaBBX9FtDpgvhPyF*9Xtj}J728DwJ2E?>LQ)ib5C!#D9#D0}d|hpslpx<;KcOQm0S zQauzIh0-13dzz;o7>4^GC9Xq+imTqtR4~T*v27ffv(JW4JxJS=2ne}E@00mDU4aza z`0E0aCbuI=BNBRWW4tlTMm~9%Za4uklPB3ZQ;6&$w$u$iPJ69fDd%1uuQE=%-WyhR zzX7GLP2A9Qq4ucMIgwBu(v^bh(l=AB>Mg-zSkP<7eXrp(A7^AIMv23Jhy7p>e`VQ4cqpuf8#(gnt@ULJ~P@ z_nxIreR7*;)BA__S{{-MgU)F|IL!CmzYaEc_9DmLzyikz_JH5%h1kB%*&?S4vmmPw zAs-Wu^sW9s9%BAp(kmVaM3!6n+8E*x}`4l}E zLI{W)@vXUqwb^Lec1WwjnLlY{YuJTZ6wTY1sGpyx{bArP@;dkSgw%+BVOza6sB@a1 zXs@>e70K}_(T(>zQuj0GgBX8G0+!!e?2T72m8^A`kV-f0hZNqk3Z1SJgCSYUOmVyiuZFTu%;IM{G#?=?;YTr&1$FiER z`X55AMU*r=GEdd<;FCc2yLu>;VDd#K`nk>K%m>>p)J>9$y6T*if(S{JvJiUP;;SFh zCgt?$|I~KTigLPNV#pSCCP%N6|3sn@LWD0Dy2b%rP##~xS%%aw1KS#oC}*h++cgxY z`D&c(c2yzw3|nvc7e35oPoe8pzUkGkiC5}@n;ZgBg`x>h1~n{V!`Ah2PDMv#MWuu&t#%eaY5b0ev>_g;#)HzS zpp_%awlSk?1$HlfgwI_(5rw3F+a$YE6QH|5;q|W_Y*7Ioc;)6I6UI~5M!CQ~H(}Sr z(VqfINpfm^MIg9nu_c1B0QpKS%$w_V=|BHk!?kslMtU9hvbSGCuw3fmro~wRdQhr! z0G{KcrH0Gt&0Ic!Y@s?kA!=cOdVOylbf2?`?TG zmO`Yar_px_#f3Me4Xn7EHHO?!%%!$LSTC9_1$_-e2@L#iV0e*jck#{r%)VQlwKY|> zhX&g3?cDo2W2d!peAb5qsw9d^#UpBLWto~1SS;A)VrfLW>(yUXYx|Eky;d5NP5T#o zUqu(ex@eNj;%2ql`9-$0y%(P7wOl{uV&IYVp)mZd-MP=sZbOobjZ;-8E=^OX1Gv{%1-y=%b!hWikAHItT>UZ{JT28# zn>r187Y326GR^cQ$2?1CeX%EbfseO7Mi=}21|OJB;JY65D|ALz0^1TY zp+PXCSb{(5sDOY9!0IX=-PUArM&YE_{zREAHF%{`+NB#ZrLd4^W-~FfbFQ5tD8{yA6n3I$z>dnR zPxZ=`p}u%urZJ;C6UYs(@fDbvKqq;LV`&aHUprj+ir!#` z;7mNS$^Q|4%ZdFcKKU}gbV_bUU2k&lr>o)aR8KR(2}jqN(!8#*CTk*?{p$U|#;5sc zb*2$3DA+hQ7Iqgn>1+H-(|+&ko@a6kI_4%qziow)19QY+#_Qf9So8G#Bl{g*8?Q;s z7yMV%^PDBA&>lqs@T7r(W-L1D@wAHnyyMAWPhTJN4fHNhm1Lq=;l{33qp2xZeAT9< zT6d{NvD)W>+Mph)9u!Ht0<5?He0iBbCF4`I0y;w_A4Fm0+NEy0I9L-?ky)1SmoklA zPM+pIWs9@&p1nStUgemi?$3356dGFJFqG|uZ7C<7yTea0dG=1H0rm)IqW`#Vsgr5A z@E&!wOTOW!|AcW>BPr%M#&+svU0HUdQ!ec@dHZGGv-_v6sByk84QqCMsw^tW>i#F= zkm`$O)pM106cdOX6Q1q-KIO&fquBTMzRyZP{(VBi5)UrkXUdsHS9l$R5I2Qkx2Otv zQ%?g7avT8;!$}9=CpjEgO$v(Zms7b56|m)X^9#tT32VqPvu1NZJ(}w$q$sW-he)yekWUtB*m*u8e>#(-zd>Rx%dSz%=x4hFY_wY z3cFfGrpM}~P4Rz@jV>If6o#}VXe57cTbFGxgW7 z3Uzw61nQq*4D~ojV9TVX?(%36l}a6K(bcC4KFnEs3X9I82b^lG^PW>}Cgp_CQSk}a z%a*sn#45J9jjCBP%d`@o z=1cxwf85yqz||n^IkuHJb7*>q|JWK#pCja#ncq{MSRh9H`4gYcbWRyI)t(KUX7e}^ zQnT{MkB3SCZgTa#EDqwD1@t*T9reN>j$Dmi_0lIxPw%CUs*8Ykc}2fR=Bcb-hn;zD zaWO|1v65}X5GlRzP-yOc$AOSFM<*}-Cv1;CojYunuggDcN8bb(ROn9CRusz_U*A_Y zY02}(yJp&y&bt|La&FG&XC)+ihvLlfxlf?_GJ9@N`6RFYYsl$F9jW*A?X?=m3>Dv~ zuH_L_@ZSYjftNmEVr)kV11Ehl3(0&bG1S=PeNH|u#;Y}nz|fPI{A!-uqhCSVk|*`| zda8VOONHWEUC(_N#*|-7&hk-)vBvWb%$;ORO0CR-=lY7%!89jV3ZDtzm2?b5gFP1X z0|BLp*v?#Zq*F|lXP*2MbzcU3j~d2k@8IZ|K|{5gf3mNH*t4G%5)u&kS}Suk_4E{< zYM$mP5>GQbhNeG<7}SvI1sc#qrH|;+hkSN9ItaSgAd24-BHG)i)R~}y8DPOVWsd-EG zE!B_F%jHT>UdCFl@9o^5C+S+SUH1|6@kO;|;m`Lp!Tx{A&MQeSYR+&VFRr@R{5$J-?T8qI<+j5|S-}%oe`Y(`#8ogug!@;g&A(;3ngrHI~(l zc3Pd`b7c)EG&hm~hBEb`RbD7#ql|QB|7r)_LhW4Bii&ObPpPpA=D;Fm=j_*RS8m;# zqW5p}9(%Vf?=3QOsS7)eHfQNi1j$^QfvNabF_}O3*e|Z#9N~K{xMq~47GwjbYQ^_U z_Wi;i|FC}8vx3YQJ)E?q)C>9DU>1GcX7834{Q`>Jo|zUcblFL>ppTe_U7ECCcwZv3 zC8T_U)O1kY5*-#?9>EpQ%Igh5qh{t8$(c(k11(3^EOuZ;pcWzTP_2 zGt5j4R>{rx@N-tei*+0Z{C_GSH&yKH-mw>|*2TWed+pZ-SuTCvHkcmC+Y;gnQ^hIa z?A~yMgI$Z8$(gTe^QoUg4+6=q3YGl@YC}a?B|x|SRIF$5pPBA5uk90ys42w~C4Dd) zu_l5}1gmNk6>1V{pe#yn?}rK|w=ohX8!|Svq38n4G^ z?xQF=v(5qKJC#w$lTDVRIX4-iGSxxi5gtbbr%$T1OL0N0##YI>aaSIDWFVbd70$B|Tvo(dZtNmMcNH9rQb zZ@!*IK*>}2pZUiv>BCkctI4rKNlptcH@DU}sTrxrI)X#k#upRbj~Dxb4CswoLj7qi zUd}S=iIfE`nb+prhWvo%fiX+R62`9?2%p{t4#yE!2|m zS6X;9>Pp_bTh;7{lxOF6UcEb#hrz&bM78jxV36Prw6MfWGzRYRi=CgEV0D@)D$dC% zDHok)Bn}3$>e9>DBoXn@fu0ZDEh@kgDELvA4zX(3YA6S+#!tpC3RZT|K;P^4ka@VE z*=t7P6cSzNK%YR!6pLlnvJ)#RC8jo9B041Ywo4b7Kdl;f^16CelonOD*d2V-M$=ZP zLZOgh?-}Xq*;ODHd3P~WsvnA&^qOLZ}6agZLZ^(KiaHr!7;W3labbrE}7O+~iLjg@k5FeQCp1r;Ekb;(b$ zuI_&lmrus6P%~FA6LNB(@n5_2l*|fBxh{}aBYlmDo zTG0z-Gq_ubL18-upX_0lde|~6{`A|#tIK;ZDAO`i)<7o`add%6Xh)HGm|Vf1r8Agu z|6o=DSQlEiMTJO&AMq3DU-B!O9cRP}B^}ndawR1_6%ZkPU*t6)WSvPvWe6=aa^nfIWluri9j~ynAT^22RH!`p|gr~~mJ;jbWn=>^rMGW9R z(JLv~f#2U=P>{4*_S7R5kh7jk5GyG885(t&`_L?8LH+AQ-!yJ;^t2LDhcYzM5Xsu& zu2hH;A|jtXM|dk;R>nN4y=*8OV4Eqfkfo&6LZPL1*hpU+$J};Plc|$UkmWd_bhUKO z8N5RC$F!Uo5~6feX!B2JR}1il#|DMCWkhnH>%AFWwslOcS89xT5Lr1DA9iG3^Asbq z9`W%Rz>OPNJ@&vj`~v;bX1+~^OBR$s7%06?cv!|4F7lAkM#lkk`HaIk{W)_nms|1( z1CVjy+M$_1xxKVG6DmWYC1KZJged-; zx;)Q-D1-+I6pZ|Q)`y0pw1Wal5gs!cd0iXRD?Y2*UWuwO|AQ)ZbX5;8VU~QorlD6F zxu#Vc6*TO&;o@G>9&bzrbs5l?+`uDtq8@)4oEc1LfJHOc`_%nDO-{I-;+^{^7Ncn< zA95#m!9XM6K@%hW>^NHoJhMWt!WzOjl~qM&ar|Jko=@WzRUt>C((nAiS~lWY$?xW} zZtJN@^NRmSw3fuAU=V!kzLB9nN$H|`VUSXDb9}nI zSW}&52}9Vz=B4KE?s8|SWxiO7&1;(aa!>~JAh1SzX)s?c;zdwwPr{j)$KQZju?hl? zC?zO@27yvK$tev*A)fO-*2LxP6a(TSQZrc?JeUDHrbKQgc~KSY)RE!XfW4JM_ApIH zH@&*1cXuSu2@-Ifb~+?W74dxz?jA7Y(libV3{tcMcw*?e2qXyFm3A1!amE|4NVkR5 zsU9-Tf@5E*iAOw>w^=)@4IWikahNBpuG1$~_CQKmadE0t3!zd=D?%#{^pkI;ZMuK_ z)zCPqupuDFqtYj@1(B=QQ`F72AP!{ zWsQ45`ShWfD%9(qJ^Ft`=}sLxpV3p@6-z*%O2n1dVI`1K6dVSsFKmV&Y^cbp^ipqn z@YU+k)w`4W$0czz^CT##?a1DCwWK+RJ#%S2&E#E)r>UBjBr?|7Db*m`OOex1D~rc%_pQtG7riiKLrf4!~2t9AcErZ!Ajd z8ZyToKS>WNtsq<0xVdiS3n?xB-hLU(%b@F`QSc*R7_1*8N>X=#dO%*g5JJBNsi3 ztZI;d!S~2UcpSpq@ihYXj*QRdb^)G`M3gQO??6k8_Pcp4-ijvGSq}TmCzG}^V7-^2 z*Dn>0ZIepbgr$g-6Q7pa7Ci7xv}jWXNf>k`?tFP}G9`SgnfRM;rXcFgOIXCDiEfzH zf*IZ8Js0FTLyZVxtIxGHwDw~hYs zbfG3w3q8Q%6%+AAPOF%eqzEQF>yUex!r0KFXE_~$_SfH;q)RDYtFcJc%loVgnKror zMKZ;@48hcx1J65hUj4#s2+^0EEfWLD(G6I3%t4#^dFYQPc3xMXJ5FrXwpo)O_g`Bg zZUFb9C6Y*DIu7f>Q|v*LLrkV-X6P9kFH9$89=U+KjJUt6&)GNM|LFqgjQTkXy?nNK zp~#$d&@;=P7I2pmnO5UWxHoiWeXJaANg%6|F3oRpg#Qu!xw(11v7S{(SkP*f#j+DH zBkl31Da3oj>kO~`FZw<+kyYT?%l)^?|I=sCuq}FTanqU^nr(hTx1>FOR!vz;X@_r? ztMGq15l-WTPRmc(Y2gM}mtMYRobxV;G{**TZYT}T_*tSCNeKF9(-#_9;>8LF^>YK* z2o-l&>I6-+mBr^lj4Yoi`uU6NYEs}XZ)M#g&gUC@QEnMQc$8rJ$@cpd+rSYVX%btY zmh8xAmMlsCr9D*A2j|KY3%gO|YY)hOQeet4T3viTtYDH!-=~EfJt?=5P z>fu*{-$+z~t5M!V!d9({GU@rKj6sBPXgt+oLn}d5FwMm1W+%%W**v(b(A#pERi3B2 zQcd+9_-XDj_|~mt+3V4ASuk3hy>rtN>WkC7JJ?)OU%^#tJI|n!vX;r$8VfOS!0~Lf zrIbs7!Ez#n&cyzkxYSo%_4wu++icT!yvd#vVz#cKr>5BvA=3tz8M_@K5v3hbfk6>b zjRNxHd)*b~cTQY!6}kU}5fGn%OJZgIiN~NaR>3Cj^GABQ*n1fg4|O_oP}^G&L6tAn zFw1$-siELq_Fo)_hx4#`tzOHTy;xPJ&P3Qk?7(TR>|m6M;ams_gEZG-uX)6_;>Kfg zECWSR%XfGq;|3&IiRY8AXGpbPYMoTY#Mp(J&WH=z!eshXeG|tXCsqTwGiYdmvnXqo zBkx)EK(nEyRhT3Ob&a4~L%;5wUf)ieYMY}KZERA*(!i%+lU9b-&0n{FZzd-I!-OL` zX6Wwa44c!hFX#p#a|6Y-3h#UN6T8C`MVz8WRU;~H=wU4y5j$mZNbjm|uqR<$>2o={ z>mjp&_lFMYjt`Zo1CE0G_az*{@c zBfLki-YfB8ev5?@VcDxC!m=!SK%OhpX~NZBF68TEAo5lRe*j4Q-j`lA#WmkIx+_}8 zws3_p`w0;1Z?oqOaQ`@2Vr|VHtvr$BpF_7DB(U+Y-En#^l=RBu=0m`^E7(~`lHy0$G zt5Gs)GYslc?D;8MxZw60Y~q@>BDUl(6dH1HTUz4k2jql_|7LDO>IWjuynX@fFl(gN z#!5F$?AE>@vz{B{riTJ7&%MvT0C!Kun%bKF?FhBEk`@G>Gc7lzIS6X22v_BM_WmA) z>@u%+Z9!?zd7;14()s{gHCcBNKrk!)CIz+k5Ee@`SW9^7sAX9y#!x1W(2YpT9X?Q6t31(pb2Z% zi}9hjycr!-WT^hD>vNvn$sjF&Ff5R z8J6=}&z@G$+4HeCF%F71jjtiCK#Ope`NHhlc)vFZyn{=3>ld)iHZFW1Zbg+}p5V2L zwz#s;^r*z+B@bt$meCQ#B(W6}LWkBuz<1f^a_lJ1FZ}`Uk-Er9{7oOx!miZ&j|Miw zNE_Z=`RH2qDohNrt0(+>+eF0VN#kXV%L_eWu1XxCHDQ!x7*Ofwz9^4wh2>4mp}5#w z25>gJ?=5p$oH2D(p@<*!i%Zs5_CFI~T9zjQokFKYNcVHRq`{8YYg*Wq@Yq!?(#iL@ zOpu*4XF3~Qpm7yK@}OioviO?;mru*H^lrf*PX`-O*KqP~cC!1v)uuYPxa-jU!JLU2 z!@5Q~7*$C3*Z$Gs!+ZFnw1&3>o|iv!m&@D7pzhF@;@FpdQBjvMg1OhPH9S+F$)Du( z`zp@6E8K8ODu~U3KfI(-341J-2|Qp%d@|Ie&)C*joF)((%dOO75YT9=+5V5ScmA-) zY;J>|+hVQ0*ZcWx>}UAf)T@~+kFYUFo#p5j=F0abH}yl{^ryYE`ri|&6;-k9&ytJ4;*t?{`Th6$Do8wjAq$1FGVqGe*W^wNldai$)qH+112f$5zfc;d8YtH+>p z-9L*TN<((sd~~_`b7y)KtL*>rE#^$($f0~@26~-n@mbeDyXA@$(Wi7Z%s>t7z!EBI zmQA$jz&vn`G#H-0Fa(w!%yt~FR^ znqqU0f8*ywwkA&I8E?k&a|Tb!1v3VS9q~tf9$U^$iqzBW8qP&8to#&KEY`&rt}`h- z;Sf`V=I{!*Z%pSJTj3?wvBkC3xhIyJf?sEU;!p^lat(}O$F=(Qj{VxOKj00u>&iR{ z>PdyC zx$1sm!y{bA8S$DF>08Yccp6!Yd`S;-Dd*O29-P<^Q39{cB@eDoZ6KNYv-mG-nkS^Z zpf01(z%q{V0$0D~$4?^Zv~!u#iSAMFw)6$ThN78HkCski?30cL2P@7PJoImKfUdeB zQ!pW z%(dA(62rUi3aaKu-NX(4lG-9owOMOaPHEX$iuU%5nd&O{=*%a-|K-)TqIAb&&dB)5 zlZ{$uV>hf)K#>*>zxd;h$}o+U&7{$nZPyvsO*wVID-x>nvnOd`97Ppxh{vYhT1)qLezSLhp zsxL?GzD1*ilqoA7wl;%hU7&Xy)ieAE#YFn2TeH=s%>Uv&YAEC*M+W?JrI3 zi1e37&qB>zAI8t|r?4n7uo7?z;6_7?%GLA)$?P1VZ0Hb5(j^wGD_`~-E`78}a(@+S z9x!~ttw-4$cL@8a3<>(SNK8nm7vh4DPiFwEbl2AR<+5R7v4HUp1lyfGb60*@d|cGqp^-A8ZNQdf8TI|&8`5$C_ z!kFoyQUOW(^kK=Zo3L={S(a8On$7B3gwI41-B2T9tEr=Vykt|@*56j(Bf4*j6etKl z3BWJAyiq4`q(QJ|oAy;%oO>@Livn&XctYTK?yGn}ec+>*!&_KQGRQ-&eCH?>+X#W{l4KjmP?Oa6OCp*?5f z%Nm<7u~sbXu|5SE2@QAV=Pl8&y*7v^v#|7U<+)pdHIp!MP8K{WG#B_2<~Cc|oH?%K zgXE<5RAO{$ya(>{3E#FzhHqL4Wx|?X!4qhklFq;AYXwBVZg4cwonuvHIKX`2jSLC5 z{mg-Q%iTSYbih9TN$UNE5}SIbhRX(?kFl@7Gp?$w`{pflyK}T>XXq;&qhYG!Q=dVe z59o`mD67&Fugr#n#*(2T+MmKZkGFEJxAvi}PyCjn=`?oEWf5pqb;g1H{+kT)?=hbl ze;LTJ@jThev|@pr9E6OafQ^ZLov^fLMOe)T0hz9$AY`=Z`#(q_Z<==17;p_`{ixgX z9f}JrJ#J5q0$e-G7HiLGvRNv1u4Gd_Y+Jr?4(6ZMmG}N_wq-|F6n@upW`R;qTM_Mn zdt6KhL6s%9HeNcXep@o5@Th6F0P%BsYXxH|%Qf=4Gvr-f97eHw6u9p)Mg=|CkxUHaaHp8$I z?*jci3?-T(=GDlZ!$u{-B9(!GLuZ;^@Lf%p8`Mf4%_;EKhGt!8GXFesrhZ5GfrMQ8dA z^w=k*wM|pd*O2unP&2gW_Gvw3hLY57!It28D>*8H1j5P^A!aGz*N`eTE#6I_pyz^- zzND$*YhuxE#%cC_ByWA`bGy8O?`04dsSwmR!P))`h;eA1~Z(HOU7^m0TMCypq%gk5=J*NwB ze@tZ-QEc%m&!i>(b-c}L$z71~E@uxdaB%}K*oLfLn$o->7yZl$AI5K;&9rqU?5SsaU{m9k{bPdN!UxZ}K5u&@NrFo&(QR^%hH*?06h=AUE zaeIt%34d;W&g}=8^ViMLM9rOEtatwz@B!t#{MSD7DTp_&OcsQcj~xPr87}4T8A+v* zcAA+k7;fD}jG(R+?rf`AUh!N#i(*0;ZD(#K*NwGXck7Dr^;fFXFF=szJLbqIB)kWF zTYhdK=hY)N^rf8N&IO< zvNLXcDcfj=vm6x$Y~5_?1Ng>lF>8l5i!v0_CS+t*-!(eN(NuI6dwD2sR zeOYs|XIr`m3s~s6(uDIPz11%4=DOaj5d0P%FGmzyzDNp8K~Ca@n#0iW)7m1}BOQ@| zEoVLPl}Uyll^DY3mMw)rB0#>hPWYSY$S0<P#0<}?Ocd*lk}}35@*~F8VoSoMazXLpGw=L4fxrQ4=E41=E%x*N#UNu1YC>y6xSc z#lV=Gsh^{*@9YRNM2O3^4M)ed>PT>Yd>*cd&vBBHL#(qJJJ9-VpMnQOS|L9fszE)$ z#X4yZLV`mn2v4%jp`)~oV(yyDFJqf}O{actuD)ow3E_RPq?uZK4{}!0Lvwu7!IlNb z&-x6jrv0qY(#0Tm+^3Fk3>v$g_NV1ylt+08pCb|h3bP2CX z6)$3`ol)(i^#-d>I*7gP#AjRxbZnGB^4fPYAMWev#>Up$=%v4_?N7;gJR^mjghtJl zyPn?(3kv`EU~*=?rGxHR4Yt{jw>`Q%MjHl37DnpikXi}su6n-NCw-FQEFJ97EK5s{ z6PRuH)ft>F8`?I|r?$n$Lv%5aN5ce0XJU@+V_X~X!?LV$c~}LV*WnpCM+sbdCYz(a zfI;Tah>@>c#2;N==Z08l8Z=?>XRmIsbgfrnIFFkck195s@r9Y2k2?Yy*6 z_8>*tOH9ZkI`xeHDc>bW=N(9ds7*}{j0Ar464~37YWby=ER|DhZfj;qu=#u34ufA2 z0BDi~lDcM1=tAyxhA%zQRm;t_W~LKBSbTg&AU~De?7C64K3#^|CMhjth1yxa@=WQ^ z(;UZknjHvl)crJw=;&JHFaJ_Im!2w5mmKISDl!+2(5m|=PAr8xY>@>V<^}a!T=S0h zrPE70IxZl5Ad0Fiy$Yv^J^@e6?a&V6(It-Z4w_n#4P{1w}^6JNgCqRN;-N6=fHy*xLN`g9c?G#M@&n)&6vw?)1||v${P_ z``lP>K2A|Yx@rmMxoKO(FEt8IqDBUX1|PJ#3=9|{X8-LwPq)@zuC}*OpI=Vm^93)Z z4Z#hq=RArL2A^~B$8KITYT`H9j-`@u)s~X+EBS%1-f7ZOadbQ zT^Ao7OfX?%MlPfr)DNlJUR*JI$&0TVC_3%9DzFiu$ZgQJITTB)U7H_w2daZM&^XvW zd%G+*FZW$f_pVR3&zOVhZ`LtNtCyOtyNL-n0XMeQ>u&173-B6E5kv90soENhKMh&j znzi>A#(v{1+pC4XlkDuvAMN^K;zVlE#8T^=D%$g9JVZ$Vj>Xt@@(b=^FX2E3H4Z!J zZ()S(05-1FTc&315yND+1QgE}!a)W#{<4cVmtD0j@1%Fr6-*(afaL2Wm?TkT35-Cm zh1Z)e(4{hFb~CCBDsu{7*f8mW#HaB`2hM4@KT(BN%aVhVQd7@fsbRUe5NA;7plcu5 zvDRdk!W){$Pfaxvy5YBllnF}$<9eSn+HS^X=BOB%(}wVnq2^-I1Ips3)+~CIqaO6J z-2GI#)~h$IUI_*?uZWRj@0m}eE?x4(=p&p)jao<0_KTK}B z_Gq#4+DP_Xp>pU*g!hyuy0mzu>q zarNp`Q-_I%T&uFecx7ry{^ zzZ=6Fpsnkz62w*JM(#CDamBwi)QW7Fo~s*S)m877rzgCcx)I8`5Kio`FTahpV6EOR;$BAlM`Rsq&o_Yb z+0xY}I#`u^q%}{`uFzfw|7kN}HPBUCZcz_XG760leC`)exF~i|UrH@v!%rS`5>sem zl;sdp0hxaZ<-Fio2#3udcn0Sb@8C;k>QL{I4SVP47yf0{qw-|f9=3)u_6|DMu}tG8 zCSSF{Jx{ur-r^edKfO}~4)BLP3-0vpxHnK5#?>0Y$42%HB7wY-TuLxlHJX2G^oq9- zp#oGgEP1tvTbFAu?#*BTsHn?9OLK5oT&##JdG$r&YItN|82}=dxfXcUF#;SlmC_^DzZmGkh7V3T&fXAQfB$mkJb|%aqcpy{U$X5 z?PQPbf4vW>)&gpMt{K%xe#Bc(+ZV5V5=B|yRJd+%<26<(0dnI8e>J9aLsEldmujpQ z=7!JakKAcygYvd_%Q{J2rX;ssWQ21#<^8n;L_ht$=l4$6<7oKdeQsX6X8u3FruyIa zYCW;-*rRrTQ&SU;o|bO6kv651zh~j{+n6W zo6F5pPpu(PDj+(kMi(YinKWLOgfeW9eqj7N1Yko10D%Gn0|SA8fC7Plf&c+PfPg{4 z0N_Z-D5yjbXvBylOz5O!%!K5OLKrM;>=dlR;=daJNFX2p5EyXsk7`7CH^tr5FW~*6 z{4TBu7K!pp9(#3zo1J2lmU*VWo>BEE0q6Rboz~S39w^YL%23Kif+la0r4=z9o*OEg zP^U&rOuSnsIp7d&%P4Q(qz*1)gj@Y2be2IaRz&A0ckKQLC)SR0E>4m`)ScijzNdcg%{kyx>yUo?tdM>FHiBR-&JYQzvDK(T z@sa{|;BiEmO8!r>LeXApnWX45H1IB)0W?5MtySs20K(0CmQ*wyv}Y7Cp0N2DsDfP_ ztYXC!%p$(b%%DW?4|#(E?LS6++@CfjB`&nLeW*!(RjZKhaz&$aHZSZs8}I5p)T@l? z*ag)Z632JpZ0dziJF{n5Zq`;Fq@(cJ_JaSXIW(X#h zVal6uL>^0UKrm@_6{l%;#JLYKKP1&7&i3_J6bQlTP7k^&-j2mj`26IS2i2X z;{Eu@J_qe;m`1vSXGrQS)FUn0$!TV->LF%;!aONtW^yh@ohfpcM7-ER5&jpzO=4?{ zE|cO-O~LgGc&Hdq8Fwh2NgR{qt#HyAs}=#}YZCb$coh9sO(_8QC92`f#Q8 z!?c!h@ZCZXv%*5au5hksrsN#+KXLK2;*Ri{>U@1rm}esA7*G|Rvle8yfE z53jIgy%eN=ny^-<7i1Nm81_!$SaQhYP~!*ZRlIy1zvsPRbOx;l zKF=~T`$b(#F{qqY*~YKYx#g&VE#5td`2<5R_LGLq=4z-q=tW`75@!C;fVt#Smx665 z5Qjm9m4Qu>kOgY0(p;@U^|AclcZGTmXGa%Q&+^fw)q~1B-eDL=KV;-=HmVje;ILm~ z34tB~i=_$x>4O|%`Z`JP>zq9T5-KSq<}R(`d0$#C^N!DUK3_Vp1*}w}_nk-Krw|UOQa+CEwoT>e^ zEl}R8K1Iv84;-w6=5VSd;KJoCmMkxh#7$8eW8u)Xsfj(Q!ut?Q*|UT!ale=pKqYhP za6p#AGSmtj;3F&gJL$|2)|9ekn4(g39~>}m6I;TwaWnH{xH{I7RCR&&OxzTN`~lxm zDOMbVFd>L?eqYjypU5nqQ5s96wgWQ4937H`gx^TaC8O~RP)W!k6f)Op$p@NPU2dvmvX6>ojhbb z%@2Fm^NcB!!nXr=)oa0oByT1-JXx%2j_XF7T6uk_8+e#`>hn@15X@P*kU2OZx8>wC zq(Xf4-?{#TYi&v^gXX>c0-Cd(rC9CN-tG$jNUG5j`~tjdrQ02yH_62m^QghoGU!oACaM~Zg3rpl zONU(fI2JLafG-jZ11pu!kNKCBLgf9KCqfhZ9W|uSbr~bxMv)W#=nxaviek5jVT>gt zh)+m&u#7`99)yoiN;{5DO7?rwJ^yp?2#M}{$(QqiqRUma2D?)yVeCJS%!A%K&up#K zJ|{udl0*<=={WzJn;(PyY$}@^n*yTGyTV*$3772~ph=dRAgc-I6&~r<2ScSyt=GLO z@?bE&@92-acERV|E-jRFukj!tV1B20!?Ll!cSnur(eH83&s2MyEAi;CbT?b=61J!h z&RIn+-qs0Tg;&1vgi*)#@=3~;zy`-&%ihOp8asQi(965OesPW^1E;YUf?dvw=^&yL zgPRzoFNEJxl3ISIRxKn^Z<1;wFU?6(M z3q4ACWUJ|EAhXeiAw8DNA27S;YplsW_71@tcFa$y@#d!QQoKc9y9z>;`=krD6jDhs z#*1nHTM~z~c#}W6@CPZLvOEE4#zSK(glFirSpn;y*;5GV0xTXB&o?ZM+Qeh|5oh6H zFxiwPj~rS&QfuVxFy|YmSUOk7tkJ|J>O9DJn&zVyGyDZ73Wn0IKcTH zc8@}oBJBHdT~ymjM6>x6Daxj=U)e#tLXbf7PxeqyC=9Y@`gd*Y4vIz9T~8%sh0GNe z|9PLR8?vZ>z&uq$n)va?W+n_{8+&R*CQ3oI3z1&wWdsAL#i{j<5izkXBag3jj@dqe zx6;??bI&ihLOsq_TM9N0Eg6ct=-RPl(T9}4&2(yYupyXlhRBnQu2G9md_b2U*TDI3 zFX-vDNfL2jE|gx!!0>#9BC_i-u*+n$T)f;|+F=8w?mrF0iZ}+)-9t=j}z= zn?#w?TblJI`%Yd~QGcsC(vUbTPFkGAic9HTQ+t+SBMXrHOhpAH-Uuz&KGm$YF{QwAVbT_|CmCZ+!d1!=K zUFtNch$dpYfA*K}xy6NkxPxP91$!D1rW;I(pgdm4t}^n-vhK{?Ttf^_OnMBbrY&S` zWHd!BDj^6pEXwaoyvTJ#4gQ22&vHOp>JbY1D`lE9?;|Ef3Uvd;zXI5tR(o4?)WKJz zRhF$kL0w=-iPqVvvC?Q0G!5}4$)Fb%6Wa{WQZagxR&)3)so}AT%3t@eGykMRB8BGq z!*h8Z@UT@LOjP(9{!{4Ss-j+;>0hi)z|fT-Iq1YzW9VUTrv>FQz%71}5j=;kkr@lh zX3@~*fe8Jh4=+?8u^f6#6ufI}L?{d`#LlLS)Qje9YM(QXfbu5O)MP@WK?Pe5C(7zkgub2;R!R5o*n-v5wJ-s?Y?q$tY)4JNGW#Cs^bW4I3c;O;tb?^rkjWjAE zx}>h!MM+&5xQKR1z1j;Ipu}*^^)-3jel$P~!v6@Wm=Spu zkQO;yBJ-r$Yo=D3zM!hCbHiA3?#PTDdz5?52X-MJh$Ca!T2wpo( zSfZ@My()#SISOKo)|d4C9H=H^AP3T@9rXL0vM8dz{{q(F3&k23w#HiOegOszK47V0 zv&9Ejo>W;n1*>x9EgEiv2FIZeZ!pz5BWoJWsav5 z;cd0{-k7TS>!t^>&u)@#VT)kC!VP5`HD+1aP%gcgw9v2CNU=`jlti0sPv;}OtfON! zjy_*jmbN4mqsHp_G5Il0@82B}D&K%d*Sm$01o>e>G3eGb_~eibC$I^#wk$2D7=8RD z;a|yJk>&W;Vkm{I1c56ee6mVTj|hbv zQ-=I&aKdW`_c{7^Ls9E{T7%C>J(CJ%KuuobB+0OHf_)RckakOB#!OvJ0@rJ=Edgql z6|*3EU0pa<3SE>QHJ8Xca?2(7*6`5qgs+T!=!m8)K%-C~{15e!u5@Mi8skaVp{09| zYsH)2&OFp~YXEKmnu{>Hd>=~M+|c+ROu|uO$j-@d%;g2STg^bQT>2c1t9on8erKLd zGEuuJ7eS{f{S!tg$`!c1z7f_LJEC4qj^V&HSvIs!f`A^4cl{=BtY*I2>Hw(6l9bRu zk5?z-y%(jHzNNgZ);8*2fI8NnE+VPmCaMRPzqM(;TaJMI$ePQ@ntO5%3MB7ez@RJx zqj^%kYu7ODBsZtFHp!(QYML%Ol}+4feW-c@v7i^wV-QqH<~uxAdN3e2IN5LQSi-n* zi&9BJXyo4z{kU)Qz_Yp1`AtF!Vn15su~>7$e&k4nc0~#wQ-@D#c{oIi6z?np#G{Eg zYi2T-Xo9Rbc5a|jQ4FbwX-E|Fhx#7?e7db;t~;h&Y>=#ziHv~QW6m!C9F14_a^35I zx>Tsq#x;9kv%f0gQsY~;`iZ>`C8Z(akTmdJ3$eN6lhwU7OVYFz=X}d!jn53$jZR0D zCJB33NBn>V<&ESk?iUbRzdu^*tx1%|ov?@B?iA~B(iYEyiS9-kRrZ!*DiVOMHhd(p zQKY_UsH?zK9Iw>DbUCTBtm^bN?jD`yph<)G=i_{|)k=3k!-B9I%?No zHL6ausN5-66s;tKm|=(Z=wdlVXQs4Ta;Nj=+ZwXtc`AYbg(@W$u6oJt>wD8luy_&1)1dy^c?XMeNJe-i17wx{Zv{gORc~+@XFO9`i&(NWxQKlw#saYnYveMot1j>c0 zJG`)eg)My^M(r7yj9 zN)o=zpAt!k>QzTEM)^4#Of#{Qfi0P{kG){L?c9G$jG z1yaec(MYnV#KPDOUW*h|f4U72`>(nuR+8!1xYxb1`LkLv03>`UQ+(9GB!ccWGDX7ySa_mZ{fozWG1gjU;T`c;KA0tQ(E7M-Ki0*jK^gZ|Vd- z{IdI={WfmgHo11_uI811MJ1{wSSl&EWLY?e^jO*s>h>YaE82Nl5Eh>=>*02P1pDY{ zFLO4wrN0Tj{3_H0+|OSZyMXCO4I5TlSM{1;BU&F1gIBIiG&x{Knq=5WuX(IHHf>Hl z+B%pPnjMQd{@p+>h*^;gzEiRE`tWc>zCG1;SV@%DxN@y;R7?HT#OyVVVfln{^dlt_JqR<->`z+VV=QwRrvl zj$f@2@bZ=vBUOb^Zv5q+#NF3hNeW)3KeU1@8Z&9o2km1!kT8u{NMD>iXB-{Z+r$*k z;ZWgGBOeOdY%7&Xt_2jWN3hTy*Xz!fI7m9;3Il|iZ7#;zKDvJa51%bzv9tc(`bwtS z3T0l*6)$q(XTEiQsregpCz1bv|Axe3(i^K$U{U3g9;1nCUkl33%B z#-%x0wJ{rSw1l!9>|deK;fcO!a`}eoWJE<%SuoPC!yWwd_;r3 zEcr*JSH7K{0=F8%Qx(n_7S_{8y!@O$ZFt#`@D;!AGG zzYn~sLb=yy=RCWLCBWuFLad7Q=RCW0l8&xXSY}aiLNNJ=Q`XgGxga~57#MHNu_SPU zXp*5ksaQCnC!q=G zo?m{`JZ0>#`!cy}U&4h>-jp1_;)P~Bxu1rzZ@l^bgN(Mb$ zdo*+0@B%zKmo)~@Q#bBtj2#2)Ia%d#klU)negSOv$;DtCG%UJOf8M@5 zrY-cW|6>*NIG9;2h_|_Njzrii9VUSjF{i7CLq7=tPc46F8)i1nuYp8Aq`^b@;R6@iORZmk?vE9Ud z4wInXdX>K#SzLbsd%al=(g!9FD6w@uo>xeyC~-U>I?`Y##+*`*dpKHqiim4Nvp#?%r_2{)j^1tZ3foR1-I8{S4d#xayI+SnR{Ru_Fe|UqIfXQ7h<2 z4D}A5##5KU(s~Ew@l<|W1h~y-3KItoWm8zI4)8=ynqrCN5(+Zd%qXq%Rlra70mHw6 zSOcg-mrEllpxt%xcB~vmt&}WT=vQ@B@Ofr6AF zrN9!V&Y$t7=Xo9)Yl}G7DaWKmEU#A8V~Mc*5ZL-LCalzm;gThwYVa1_j+fp2b+iaG z%@+rY(xGpNpdid1<3un-J^KnAEri`{Qv#T1YNMYGTCqIgdwk#;&Zy?GC8nbGL_6q2 z#2WIT$#g+zaD;k;xcNAWA@QI>GPpyiqzyI_66pv1QKY%S=L{ieO$s-@Bf{N_xE_ti z9mh<`=|{&pH|hJ1Ia=vG4$hm%e{#(h1xZTB0pICrrZxlOdrY)npz&k5-Hu>&3CJiU znxc%xOP>MNWGTvQQ?q0yDX*k&9Rmo^Gt4-dTlVmxSCQ(( z<)&4J>aG!FS}`@rXgVmWb7KTdx@12Q(8u03((6U>FGERF(b4L|v31`2vZU5|WOXOi+Dih8J^JW-k zfXAY1Y7^s4fTf#ZkJljUe7G39k7yU!TNZN5WGqRVa2F&3Xm64!lVaeJ$WfPD(HH1Q zzIQaQz8^es$Y#TdoP)x4Nks>TRsO}SbZzfhQ%5j(B)b(SS)&$?c}V}uzRz5HPk6qv^#Ax>YurkM}%61Rz? zHRNg+Ih3flE@q64JMURn^%g!8S{*Ry#+WG4-@H3c}yuni=_P1 z1Kp+4GvlX89*e0*D$JNA0`wYz@~h`Bm8rAFIbV<-=S>plU5D&lc!UjO`GOBUWe72k6m=#}*i&H{Sf*K{kWBmqY!QL0 zBBS@Jkz@z|XH}ke&-%i5sd`dZ3`5aj2KK}y8d~9o_dB|X z1hIR{f{oPoR$){8q$1}cYShQ&FW@T-*TfP@tcv&gp;g7;vW`SYwldKm{->f)u}a2} z*=h{UCXS_K@?!1b7hsiZj$YjOx&S)@ZBl2MRFj}j8*)=ZT+6psa*DQAl`Ql9I$`38 z6jakb<%mVkLirt9f$0-MSVY<%aDT4FJdJvxy%2tQ6&NPPZS2JSyT%X~rF5h?S(4Sr`A z5ALB#0=;}+2er;0E5#uh)Rw2b%oM^wLlM<+rsRyrp6M?1s_(hh!O2U(gOiE>5Fucl zQdx%fT3MOLkcQrZVL2vnTR)D%TghtTIYwCW!o`OD2mSukUVm6u{pf1#ACCVZgDhLa zKg+f^*0p}M->dMqKYUkf2Z0ia@w5FQ!uP2Hz0daPsT$VK}jk!zF zn15r*J1FX@tw;eXjqn`kR7BXJKEt$iFaCrE77w}e0Y*N7z`wmFXvo20?zD<@<^sh+PJ=`|MOMw zP^=UR0-bkPvE^xaDbI`q->goIAvrR>O@!2ue_ZEttf_c?JJkq?hO0#J0Rjxsk)w2v z+EBkQlt7+aU1BLbV813Ui*9g!7c4 z8}a0y{R>!trC2D&DYBrU`~`gIwfD)Rq50)K56`Qzd=OyB91@f}#w~XeFZ@_a#Fd@p z*}VD4rx(oPyUd%*I*GQ;54^>@7SKdgdx{jd6hTI-_q)|vZOthLyQ;VGr)F(eE=)IB|r=*!25RvrtkO3}J8+7Lw3`Q(wLFH2}b zynGttO>pfp4V_ceNIu_R+c<`B%7GT8PD;wxrhWNU%g=sJ40EpqIv@ZcO++sLFfq|4 zT*Jnyk~-CIt_M+$hRa{k?gTg2V*tm;D>nKYsr;qqiYynC$3K$w2_^SLduRFu(8=ql zdu*BPOKv6nY&&Y_umn5o@H+h9!csQMAR$#|x3zgzmeA(YBr9%doOpE%bAagea`s&t zt+M5K83{~0_<>^~RWa&Ofw)U}A_kpBFlBdgxP7 zV?2>oOp`_-+yvlDb0Uc@HjWIDLR=j1T_2Lf6^l~H`Xg9kcE7wCZuK|ES`6taPvjEQ z5S#Df4Hh#iNi*{%ikc-mr`(uEF)m2%@e7Bnx*OGEJWD0)E;}XMuEC22kU9;WjaeLE zCrjsOxcNN2pNCIb)hRyvd3Z|v&=bCo3lD~%kUSfc5W8?(i(HIEZ2MT>e}H`F5b+Ya zo5$DMd3Mo4cuM)8+7#P~n;W`}CYAbkR&^40?LYMH!la?rex)d<;F|Nvg5mUoNe|-` z0mte`%Fow%F@Qw{FCszfM_a2-{sOWi&>mrk6xXT-TD%T?4#Q?ZbD5a7jvzSFI($xU zq-2q)pa4WLoL=&3Bnjg?T)5?zEPGeDYC1AD;{?@Mw}BcGHfz`Rp(-yd6L*CN0Zg2; zUoCxnUgZ-;?EqoNa4}3^8i1lYHj3qv>FAJB!aK|z&DA$J%&U4UqG;*5KB?# z*v|a+*C<3Fj&gQlj*Jc|k$Kbuwad8T8)CqAVHFj2i@j`N4MB1X(k8)t_~KN!WUUko z+KM*qB+{!J+b^J!h7pp0RFBURvS*4Oqi#^qId-h(5ImhAM9C8u!!&|hoOcu%^50{p z4>N2(NV=G}S2yLWF4mp#Pqa~wBt^)!9~(y3INMlK_%DDsAxFJ#PuxY1YZllzIJ_Jm z+mQHw`y6E70m>P40`@~nP;rjX7`X$fkouMrhVGJd=fx_4SE^3IGS z`Nwg5L^&Q;TUI@xVtK%btA3z-Y@xf|V;9zbR`wS#xxPMak4e6lgq1YC-e8dFNzCcc zXyT@j?p}{fXE#96`l771Gi;+?W&0r`^Yl6XOyB7X#GZ_I1@blLX{s4yk*TK#c|~ z$ewe=xTrb9(qv;_x+RaP;f8#0lB%qpUQxtm6ASyuGXA0}HFJZ7WWBXElFTNmGLOwB z$4tZQ5*Vj={0r5mWArO$6{(v`nJnizlJiwRQ84g1VbcbG`5tde80c`WXoT zfbGYT(~T+HKp7nZdTp$IZ6e*|F80cV^x~b8w}bMlR!_C!RF0S+b3!RcXa@-PtokZ_M=#9i#ZV#g?F%|E~QEg$Fm@L=bh0kQ8D@svZSIxbaN zYqp7_RUCSpWYk%MSQK$%{G?SNPo2&hj1d!dQHtwy6`Qgv(XbCgpGJYMM>{5N z{PgdmBD?+aegS&&k*72(uo74&;YVWj@Lqe~D6GCYnOm)Luxl6PS58Jv`23Dnhp0Vj z|AI)!RXF~EaGJblG0tU6`6DHEc8hpitSIL`48oBzb=$T{VL?TnptqZ#USRZljb110 zzuj%YV7l$a4WtUwR@xy!>a8~``Az_}bQBRVjEeXK*B(1*xs!+Kl&m2Tu=_Tuf+p=u zw!3z(x`M3EYN)x!d?sAcD9IpqG6`){o%kz_=%l!;hoo@fuOL+NaWhTd#(~73_XeCI z{^anMD3skdsxP*W3j5NU(>zW5S&>gLVV_*4?a#BNjFh~Hn)mc_>ZXVEsrp-$^5kUB zk6yV4tjZB#N0Z8uf>P|{0L^W*BX8n16fK;ramgh+X!FwwV;yYz84jIz#8GUuAsx!mT7Y`@mdbJ`vgfNdv(vTx{5 z`%sh(;hglNEz2xVV{EQYJQjP%zN-oX-8ocx#AjXpg1t9qRv5dt!2KUej*6+fHc_c3 z?p1N5l9Fc>H?_YFm|u)*C~NT+Edj~AJ76>c*}a4CnQ)&{qzzlKs>aM(;33s&N!LD- z#Vf33w)e3K7B#6=pN`$D$*WXR7|)X_NTch`)l?_3PV2@a*Iw5z7zkbvGYFP$Gt94I zzG)zrptaWt`UR}7vhB|;V}+;xhlye~p`dWrU5I&XDPbTNq!pXA<5>oqpz*=t6YxxG zM2P&`!UcBg>xjjo-Gu`LbCMjYjY$g79R9=Pb<7F-Mq{*!h?-~L6J$M_m17hnkn~Q3 zzEg2n4WTO!p`r&SX+y3fLu^Sf#({;MTg0$#pSV%grR(HC7zVDvTAkYDx>qaz(Kuwb zVOJmJI1(;pY7fa=GACRe&Vw(T^?uzWQUADr7|k#Ra!XY>Yg|2K%NdnM(0Wqf>S(L4 z03XXMr)myNF1%$^a@OwCr0AOtgM?k$mY|Ff8qi3!B&g+aLILEtK;OR!C(j)uzkn5^ zI+LXSBAzZ49abSRsrvL2&AYJGvT9?S6>(et2;y-G&iNA$k_Z2nr$p+ zB+`FY=BBb$(0tSdB%*Ug%v_4hCWZ=;=tbxPM;~u`>3VLZw5J@%%5q7f!*Z!*Q-e;y z(+pa2i6i)J=#(KkHy{&y#0+_9OPS9;9jat~jI5~5LUYKYCy63vhenAUx@hkC}p6D^_Nx5iy1n42Kx^ z!y1rDs>7==e8|X*UqB*-nS9*4&7Cg~?1!WtDM{yP8c(g;kdwK~EVW@7I1fUPO3P#u zx9DXQ7f^&H*$7w8x?X$#+g-zm8J-LVs9kxYhN{nsyUD)k|3%VUMzx_eZNr?Fws?y> z6cRi@afiFOQ`|kc2QPMTmjnn7#odAiEl$wl?pCxwasAHoe!pkdp0)Oxx#Z7Gs)3?B zSi9Htzhgv)2+Yl!Mzc^j&q1ckUo5&p%S_p zk59Ix9Q)U(>JK-psaS{aLs&Nzi?eVZ+e`?q`Z%q1gO)0tP#}i5dnqd;BvIPZx+P`p zbABbyVP^vyKNC~mE@{_f)9?a7;ArMzL&pU9(LG13+zME=Y@O?Igi%elGvEfEJY=SP z8;0UE8EF;Z`!eK+u4DsKRFD%)DoC|D9^0+t_-9+mJxiD#`hhR;=N0!bB|d1EGohrD zb#uhq!~KP&t#coGOyN6=2k^nM;4|3yak)Gi7q0;!E0AXF>QY?EprLDYPInD0ype|J z4W<|Bd22v=lWu0-eu@}!fY(~|ry4!1{4?TM=(Ehz9#dMl>$CicMiErTwEv}mbxDS_ zG`!7{QuHi~o*~&_ksv)h@xD%aT>E#^j>)3q#pOI>qMf@IrS>^{jg{}3JgTi-{^aGUKbPGJ~CdcD;}I^_&1GSWuKR_M1=U~P-pVY zYKQM2o|lD3K6@&Cf_%aV?erfJLt0D6uI=(UI==;ogXo&$K|1Ie0^vg3ZKslb4Tr3_9F}yxT{eG8KCoZoe^ophi?xL%> ztdSY6ymz=Xuyqe4=cCx3)5xwTH)I*5|A+pKpa11kpeSU!Y?v>hEP4CBju($GL+6}t zsu&VvA@ae|x)~=D%5E0$Q=_S?FjZQ%TP6Btze^GzHVLR<1}yH5gVl9#|KjI)VpF=? zoxDfKwfxlwrRvTP44b5AAKsl=qtYW0_)&B!9z3^rqKl58bqd zK)?~IbGMg64C@zeidY)|P)k8+(R`xpISjZX(VV2v;2xAzsq)(}I)7Zxi%SjWilsLC z?2G>e=uXYs?c@LZ70o6^C^1m+66w@*oybiLSV=XnqxAA&d%tk)GQ8Z~V(`)Iin)NQ z6oSp5x4)3Vbc(+=dbav)Csjmj@p@7qk!HWq-O5u<(qiaX^bO{((CcE=uuFltN2BGW z5MX%^5GGHdKJHKymAdyog-1vC+(AW9!_r7WgQbmk49~am2$FPFEcJoE6|1E z@ACd#vhL*=qAP3!7QAZ!-_5yvnBKp>q?Ba12}dtx*K6;nxd<*AB;3(X;;>TkKU6~| z8^^;Qc}~qFdxO1!|BGzIsYGHj{2Stv8s|opv}+SpJ^9bO)I<>ZC%oER&(HdGOn>ZW ze5(}RQmo3WHRC^RQO9%>_-gpxQN5y}vOXqVKOB#~xAbxTknWjSVOHs-hO(EhF8Z~A zvb23gYh7B_6Lz$@m&o@_eDN{@i&ZqnmR8APb^8!TF~t4z{u$>~LLK(Md71Bbe&-j2 z+qCTZ;J?z%o1ZpTC7YIt<~4Me88%sB_NVyda0Y=7;mK&V}lz;9)yp%LX|US$-UlN?d= zRam-@$_96EioQ9s;DR%pK%Kna$?)o7MI~WO4z_XCv?jowcl0jZ*;d)l zsok&C^K9e#&B4f;Yr)(#b&ypat5-k%(2}=%9rWnr7ZIs@(BXmQD_R0X%HPMkqNtEw zu8qEkH9EntC~Y_Ib8o=F$dIL%4z5m0t|Ik6WBS@2rR~VZ1(jMf0{RBU9}FTkBUw95 zQK}C-3y-)Xj=EABn6jSH0F_*y%xAqK?aQeTQ9Cs*TDdRNlz*dIb+6@Nxe3cLyTN7MPn%%Yvqx=SB>#$AC%gCIod%Zb z+mox>Qtfr>+*dS}G;_adO$xE9;V7X;GF*!3CG9q2P}i*?S^KHZ<#X0Ap^I`SkSvsH z&x>=y4Q7<6P`%ZFJ?1y$S-7xYc(SQzek19~CH5Tgy~b(x6^&YsrEuPZ$*0uloW;*! zE`?xiMP?>9p&P+u7<2iGHiZYvg&*|2u^TSzlyLfAy*`tcxo%Hn#hs%+?4Pf<=GtHvJv*&r?QJH@C#n*+n-v?oivH)kq{t zMP}*j)M{v1fNUVzsiL3GBkJ1aef8BJ_^J0_@S8`Cj2B&-#b~^~Bc^(c1zGniT=`=r z>rQVGnm#VJn^S0RyV12D)c2VbCl@DWUB~C1DwfaRvtEmc=MHyC-AWJPy5vvzzcg>^ zItv5wW~0#&I|BBbj~%^+b>bnJsMO?r6gZ=v1D(|dstu=<7QxpakI*0P(Q;r(e9klM(QjW3>!gcn*_2NNi{_4s4SFDn2O9W{@>bb9#QqD%p5bN4BPsW>U6j z%ht7`R%3rt+g!WAmZe+Sn^yGOAPzT^Li78=BZIP{ainjNbvyTT0)utSkkAlWUb!Gd z!M;TPa?4uYvLj4a+;M|a`)dMk`CGI1GJIhJqRFBYH8RQa0-_o~vJ!t--?ha=y?vE> zEG5|Xgl5Q+mf8}~y;@9s(wsWE!N5aknX=GzDYY1M{Vbl*^pAjL`lI4=Kr}#kda55; z#o2%mF9uHLG8pR^cDGdiXO_assEs})4_M+7;Fdun7BU{S!_Mvb5bxnS(1$ZhjIIML zC83#$4?C=cU2M?wRSFA{OMk1O&=T#Im&gTh8{ft(?^nHNd1(SFTREqsHH6N`rEA{w z4o5?2UGaxX_%qLg6$9O`({Eaoy0amyCpzju6&|O9_KMBNE5v0P3yE{6aF_o5D{_Y) z?0Sbgqh>--Rx1@{auU8EM<*~50?|=zB^LkgODzyp_QUQv2F-9ucD}Q+U9rp)0d%;& z=3;M3M`@O;f3ObdXPVbPqxj8-be?0g|7VQHS?;O+rA=j!G9R+ktK-5ZlFm~T`leKrsG(9=9?P`+%mODSQT$2l=$(Mx6R`f z4S*1P*JzKRczj$eH#OlLkhZcWP-C6jlD=}66y2$i#{$^T|tla**c;?IS?RlW7cHZ*N5eBvQ+YQd}OK~hKZQY9Ppc=Oe#<9kS{Z0=heD!N% zQ=_V+71u@H6%iS>Gl8^^@06FY8B2hFaGgZNVwUO2n{^S-C_nN{DRup3UTX8P3?&)7 z|2fxG2ktaPXe=z5DBO;0-x>V9{9Szsg{}olTgcrF(Q{BTQ-sg_X5K4qJ{4GNeu({) ztS5Ns5RDkPPzWg8I`$9(1uhmTDe;=GiOb@^)ghvG4Q_LgL7kx{K-XdrcHvl{IIv!1CY@05 z2G}#iqkzS>TkbGx$BD+r_JTflKRl(C{*%=b-~~>1*<{SaP0Vq;V)tl4a&TU{)l_RV z&tGCVf>#$U-@4#QA069Gqo@5_8?_R}WzmlarS}6>shB)c%v}L(s!h7n8Fw z9fiu&n@(NDl*RL_EZ!RSlcIpMS>a8|40FK3wCR-knDRxzwuO!MNML+D+F+9cl?!A@ zCK^H-&`a;FACiO!C5ue@1g(mu8i117|A--dEYRhy89(*>ALdR479ae}^H*kIp}C35 zAC|If*KVs~30D5yps@Rq0xsC{KCBUHAV);Ibdod+|Fm7uw%;aI+#ewWuOn7c*JnuH z?v)&@se|D^Jg;->Sy^9b(A-7!?+;q{(qpYBKHm#A5kwcADW{HF{%b5<`IxL!Oa zk&I~M$j=j=ZLHp2={@bq1B$sq|;h(lj@<>Btm(Br_Tm^IqD zpeXEFB0y+0Y-VC{qv5G-!DP)><&MFCjXM`mf=uAR_oKs1xdtOq+ zf^OXi62EZmcE>8%qB_CJfV%$>+?&bflUq%S+o|Ebbz z=${WU)TXsyEM1MD>+&PT1xY!sytkOJbo={$A141OE-CPlh1jU#Chc;Plr_x0Tw(4}P%Hu)CJM*7fJ5>mclA;DwcntQAZDb)+y)b(+Q-Yq# z=U!a{)Z5s4V40Kn5@e86cX^2bJq#I+)11Ar%9^XejYa9xBd^`vvEp zmDCA5C5Qx>GPx_a;K)$pw8O5zAmG#WR@Ob&$ZzotbdR*rZ#b$T7#X^I0E*u^`UFRr zm-~zPznu7y8gzdWhfxj8TbNquv@2E6}GH2Gsk(U0lR z5c4)p3F%^ds(avVYtl+t0emE#FCK5*8BpJ(r z6T(%H$ntLu+&qam)Gc4HHMd{fP{Cq8!h+);gBS?Y(SlnF2%IG?f$Oc-AoKg^6-@#+ zQ|I6Q3}`8vf=?AyOv#r9Co<;0hh}n!vaDG*b1L<6$4`v0wQ_F9)ka#$Mglyh&6a>k zU5oY?K8*~#UJY6ME=l2c9#K1SU|P8Ow1SUvK?X@9f>1*XyZ({}A|{_TcrHlGnBDAJszKC>2<5NygQ0w5g>&!Pj8 zW*5}Oo7{Hb4mam!(<{3rx%8ucaTD`e-_&Yu*+VP6k zn``-baa4ZXy|`dOS2 zqvlWFtp2IP3y~XY1?j!lVUaka-|0@A(iNI;)4i76_P2ON>#5xl?bxE75D|LTe8Ff< z>}O=*8BgHwbu-Pmn0Urtwei;tT0DL5>JRij5V^=zKFIcROHg|x0s3cGJ9-P-1__n{ zLADpF{1wsQ?sjPFgx(D&BTiZp-?Z$*eZ81^6NPRY>DHICYeO6ALV!M3*%dZp{5yOJ zE8M5>T)Atb<2!kP!L))6bJh2`=esdFWnM3ZEz~C9a^Q65||g7og|*viLC!_sZq$7 zQ+^|QyGHfi37)3U#NHUSMbuhJ6&{4b9`T;Ie;LUeX?epCNXAJhS~i7aZ}dqGgjR_C z(_UY_exsQYmpXa{j#-1Icb*YR8`F3yxjbTCad(dW$DVAW(+&-#@;xpO7y5^hF5=Fy}sTZ4Q?`F?PfWNxkrH4<^J@S(6|B>>EFA(#-sf48-`VbZr z@!laL_Xyg|oxxymM&zia_)yGDYqa0cmiXz}l|+@}JAHkk75=#JjT~KDA+YRX=;%5< zW?IoN#R&QNe}6Eh)|muFiD@T<1hu}`>zfr$B5#Kr-^dBrJjGk$O*5JBRM2CtvS|(n z?3M9WhCW74(lt-Z}g+ z`1GaNAq`<8SW&GWxG(E2_c>Hr5k>RQKSok_b3EB{HjW3`ussLBkV}G$L;cix!bj(+ z{)PX#dT@FudRfax!5gCDKVd*GE}Vh4p>Y;1B6t7( zhE-#nI>wR4e@IA=Gs~|1iKGZks=>=%`a%7a>Zy&>>r=4EhA?*2pNJs=c(;x||Bvf@ ziG5F1ZOv8)pXU_~`y%vC)we%rLVzN@12cjX>kn$jWjMJ_N?T3^{%N44mAsCh{&VKb zD973uN)wwmeTuE&7yWjyuUQnI+43I9obCOLB-Vn@z}5k=8>!GUCcB0KG&T{?YEC+= z;&G~oWxe>LOozphjf}LbeS}mN`e~fB_hsH|a%(WX66%CSnrvZ?F#Wg6L3?-!o7kVF zKa`%A+w^4CfIwO#gL?mK_04<1bxhW4Ih>fH64GiOkG!qa8hMJ%#aZ3aPdZ*Z9J$(e z(TE?{1jvYKjEiK+CXaS92pbr=x@FaL-YtC~v?JJW7m_OBO>TvX&F>G0|;H&;@xhiR_+-NgM~-z(Z5 zwCdf|=_stsP?cm0TGf`r5&E8bv!XT+lSAAtArW3{5^U2sr6t87w>(jy&Y>r>RP71M z)nh@{ck6)r}XH+c>ao ze@_CV^Jk9MEXC?YkabgBQ}~}WK5SUd|AWvqm>(|Bjh_6y5q3bDK-pVrnwxH#_Hk>D z;}B-)PX4|kjG{8<p&p#Bbem6^My~9Z^fa)jLjIecZ}T-fQS=#I)0e;h{GRiZOn>L^#9!6vWf*6hMH%L@1Tkc?KXw7kSwJ4KWoN z^ZWGjGIRW|xlV|m*8dtD=5$A9K68?*vG!)l&Hk>?dKO_V_GipvNbGQHNHJR*+f~gK z9}Ydj_yrVe`UT8qu&d5{W1VdEsqT6dBW30@j68D)G7&eay#5tAo@&qu`>_$Li^1!9 zBP=&C9PfHrVJDy9&3SMknoCSdN-qW4nQ`c=iv6s}TZ6dBvuW`VC=LrB@nt!lLn z=8*gqK@PU;mygK&_8pNv^k%#pxqF;_k#}dAN~%KNLD@7UFB^A^shz5RG?;kr8BH0M zd07fY?Di4=@NOz%<5{Tr(*XFJkjH?|4G*lES^08I^9$lu!@$Auxk{8@2 zsEhB*AdBY1XZV?D0T(_`qr#ic(c+Ja(m#43mG-+mv+}lfB2qqzIY6`%T*OT4_ zRkK=YOcfU^!snUqm`;%R+)@&l#3M$H`59uum9FnR+`rbF!#bz|#1!gXMX;p+{1Bp+ z7;VR4r-;6a%GB6iKKUg3hzrDG=3OL*Ne@HU7dBD^tEcvstJ9WlMPXs7>6KRs&;!K} zD}mQYP51UcWA!ownD@dD){!KQdVcPK`k%~N3!@ESRdjOsKc$>?H12*~N|%eJj+ z4)>l7FT@eh_I**KxnRF-3DX|)cN`T=1XC=&2PI<|<6;ZWiU#jG>@$A8>v;zzK(JZ# zeT9Ta?oR4if-k=JFFF|7(P&vk)4+@V_pLGtep4OJo^V3_uh@#&ZY;y5+gKBW<_9Cq zsO4VS!N{LQF(_q9|2JAcI}f4Z8=|e#NNsg>gFhom-!i1%;i?!83Xj~rqRpYRq835u zQvATiM2>*4skVq^<+@W|2u>3yoWm`6`-^4(4{g{$!GIdZ`$8&_PV69*Eo81Z)=SngeTk^v2=J@r;CQWik zo1EiK`adQn>^V(TyLDAfwBXJU_fWn+j;15Z{Jt_Q)!Vu>)&dRhnb_t(TE!dGm||N* zMMd4bqKVZL`+N$t?G^Z6C86;DepA}IOYK%=FF~if3(su#^2kt`C$`fDZfi%O2!9GS zc}oi}1b&T~>J;S>oB9QD$Ch4I&_#$v4Q_d_Cq+~a2F*m@i{1?Lh!P1OBPlA>g7vg$ zSAB=TC?}y=)L03C^4B;vZ19&`wqG8VC*(&>RnCUXEB1cE#HJ5^Lk!*N=ftl2pMDb_ z2kW++uGJ?hk?%L32lh7$)nYj!l)$21KJ!u)IH-Rc7IHMR!rRkHO3AO54KiT^5|l~4 zX9aIjaRdkt#{cXjpGOg8)qBoM`j|Mq^VG?$_e&q$jK3%@m~}JJyG6ZlXR6s=6r8WT z$)uk~nqiP|lGB?EziB(Egx~D4UUwreOLC=Zk z*{oPutU50=y&HXDP(HV^QxNNy(Y7PvSlaWREcHC5Kk!>L759>?We@!!OK5I@GABqc zJG#TqT8bZ!8fMVAc4-(1OJD@7GEID_=81E3*1@_fk>goRWaW)|>1lw~8Cn50%<$2% zu^Xh?^PAp1i_hgpSgZW&gZJP@xlmYlZ?~Ek^mdASghM*zfS1EO+x{xS>}{5{w6u)3 zBX?t&Y>8fUM@68qV1JN@?taA{hqUeB%I~UViY~oTy<@a|9ahvF z6Z0vA?Fm+)VRs|9dt-N-&E1WdVTF5)1D7&d!=bvEp(d(9#u(gpVvR%&luK_7MKY{N z$|1Rx)iR4=x|t7>?)iNe4lvtE9?Q@xh&eXrA$ru3i>^a$N}{faLJJ+Wo-WIjdT3rV zcR+{XP{t?pY*$PJP7!}Yd4Ec*3JclecpGe^TSi_do$yue?|~rF(^Q4M&iLbz883u` zdoW1jEl0M7As*-QrNcwB|B!%P6i3LC4KEVQU^tGDkT z;^K;rHVb81@v25091Sc08s14O7*!W$xZWiCti!Pt&w8u25_#&5bmu>G@}ZwYL4Jpc zp6C!2cV9~Z*~&BPUr$|L)MMD=N3X3HY&12~hCc71nwqC2DVd z6K%MokLx2RWTfEs@3JswaoYnHk7nYG&B9QSp|@&N+)VsVdoAsUcKQUkbgG<&EP2>a z0=-m&J4O!hgPZ9?B(b(0^T^B>nJAyX??eAp`<%E?{G-*bTZr;q&<#ZmqM6$w_%nhb zh7U|n-lFu2IEj*VLbN<)d68aSE+$wpr0mi!O7>Xta`IxkflAy>W{~m#gM-A4NMU9y z%%Wf(si*jXz8?CBZHcz%qs6U?G1t&XjD1l1!Kb&|Y!7H>MkGAQZFE#NJSYf0M9vS3 z^LEXC=uFJ6U!O9buZ@7lUD2%Xd2=W+zM_G@HEsDBPAgk?s9oM|PVB*Xgtx}m|K}K9 zzaSGQ2>qM=;(pX0MD*+t#rC}$exi0LYjde$uThy`U7*$fR}GNKcw%Muhbv#V&h}sn zf1R<;s4q)b1re?mEUk7v(ST(kU7le6nRBZB34-bHg4#eb=|`6V3M|PRW*^ zC;{u_UiE4GN?b$n__@bm?2d;B$cG^t67?JswjKh9G9$!Ii}coH#6nCP;lB(tTuW@1 zadMR*UIh%vsy=mAq{D!QA$Nu<>j3JK=b~4%h>h9?aPb#hL(Xr0(O;2yLp85x4HW<+ z^!Q&e)I{K)LHqF1p!AIijNx?FdP;1JQ27H})k}J!LAj`Z_mb^@-Pumo@QKh6y$fA< zABbcdRQBg&VZ$5Np~MyysQ)mQ{7+mDbJ_b{$3U6xof2Y^AwZ_`Js&@fHxw7PVmDlhTuagqGiS50J$hZ;R4PlY`Vh9vbLRl zy@er1W{Gc%0NWq`fEBASrhsYl#AM39+d+{L(TkOFJX-k0zMlvTE@V0@>VdJWw{L*W zt=y_x$V-}i5{q&J*+}tSrtOs`Ma0aHUjc6x_fdoXE7F`*W@0wtO06~pTaC8_YzHzT zRs+X5JSBXq=3uV4TLUW%fCgi+gBzA}hBwBiIa6-DN@t>`V0s=SNu}n7=%aKus@ry* zCa_0{`X}fAA^wpU{8A*J67QL$d3MUo0qFW^Fm4nR6{_X_eY+IT9m2_u8ocOrpDM@# z{EPY#Lvb%0Ga9S^BI}fprTMRI~@-%j_$-Tlr_789RViGZdmSLQ!1 z9@{xE&?7CA?P~cz2JvpquDnZ?hi=(6 zbtei&$>R5BVm25ZR@4=pA6`30LNa<&lNN8WbA!w9(IH8bBb~D5XWY7Ix=bhL zz)>ltqGN9FrsCW@*MEol7tC881G|AXqZz@h1p$iiFoDCyykf>ww$Z@1uW0k$6D2ur zPGVr?eA%DHWu_dJ+Mp6(>{oFos}y%*Q~kk2`ZFbFXD>{4V*$0yqNN*dq*oS8j(||F zJbjLH7w2Ari_joH}VH(!QY zzaL}Dpx)W$jcR>&;17?(CyqKAEsyUxF(+8JkyHq}y2Y<-7}34m zwJ*7LRe$PEN)@trw_1ViXxqXJVLokV4dSi&vz1u{y_nLJ-Y822Z~Hs=BkG$Sw~|Vk zy3)PILlU@RSXI&@rq_hv+5`KEm>e1a2BM}#u_AV`@^+Jclh!w3v+Nh*&>Gd zug*YYTPMTjS!hX{A!oHRvwMYA#Yy|s2GvH{7Ev1{o?Sfix8xs$F?I3cEekcj$YEg) zKIw;%!qo24x~O0N5(Q8obSjkNf59>&?!e*Gb;qs5HTc%QlWE8Ey2^WVYTFhlDnl;4 z0{Fg$=t95UR^hZP(i8}(vKIQt>S3*tF4TdQ^@^5wAT;2`cfTu*l9IBJlLd;@hkV=! z`KNNVlT&-67l25GvY0Ewv2nBE%_M)>98MY6dg2-!Vo8k3 z_Go@f<$@2ZR`{9uZy^Jg9MZVHx673WPU7veN876=!owGTsIYE=0BUKBtc)1hW|L6x zk8E#HHs+q3q_9!AeTBfFd#mjs#>B3FvEW4Yc^iB7(3TCcm9GMwf5ex9VJO;uqc9fL zM_%EIMC{4R&gpHQpLq8h#7k>#v@s_RFD5dK5=N;6B_$~>F!hA^&%7F$f^*XjRk(_V z)>X2Yb)jZWL=X_aa$uhOvU?o4XKG>x)D_0!$f&7Yf-d`)LITe$yQ(K`IMG}16>Tfw z$))iAVoI=Ac2bj?Fz))rvm9JIrEA;BKll(LcIu~sm1mV>c^06s#_xOK z@_EGf>{jW1s@ltv-@3-KT=$C7jk32pmn$l!i=Q_LF!74^MnIXAfc8`3yYb?pDYdSQ z=ajxKTw|8;y*Q2;rzr1XMD|}zez|8rrTi-zgMsNoQlDt0{l)ij{MLRB--gjaUpJ+w z$pNXm>j@DiE2BogoU6yfX`yD%sic+oe`#$TnMVS=bzI8!1%}?##bkb%A!yYn@EF;D zmb1iJ!#J(_IIA9%LlkNFTRUeu>hsYgE!oD9$W9u?)#&?s`UF}}PaCUbLcDCtyv(F& z9jIj`K@CUv)I(E5BNqis5f3A{%|fh&?Ha_2nji5tD-orFO(>5f-sHz^(c~ z)Sv5ckhOyI@pH~xZ<0VmPi}j%s8}6q;qdgYH2ztQs;cG;C!IA`R!uHbD=a1lKS4E* zM?r$^#8cD_?M`|ls&Wted?^hoTrr6)OXH>I z^%7xUkW~BQOdifT8xsj?cN*Mej|-%I;;qEi`$l6m+hVji$4qyTiNmZ zzFrPkfNJFSUn%bpbGePiad54h%aZnx52~tCvIiPv@BN3Ek~TY;E@yTZBj_rhlL!`U zFw7eaNmbI0zNC0BPyQ$L)~wq2|6JqMpF7oabx#W3RvZYeY=zywMapvr5mwyDs8To+ zh$J<44&!F-QNJIVaD7(sm>OqoU+|3ITt@jb0wViX7=L{>o|96GeuG1xFO$P}N!U4T4qKxPX*?J#@P zMpR9{RmrgpB!PO_-NueHb5;d;6N06G_9HwbPessuudf=iur6;@NP76rYAP37C_gVP zJGP)9tCa1{m6>F>I)nZ`@Kuq}iCa)cijKxSX>2C$Wbt8r+ z*+#N0?S~X_-IJi^@3sV1))f>3k>Vp?DbiwbrmH^_;j6uHz@}KRuouOv-F-zI7i_uQ<`pZh)STgFP3f)@D71{k_(rE27%0Z<*h=@{uleda%PEQ_L zp03T`ElhZH^oyI^k!RE~gI#w{g|m}gFMVsfbcmMyXx4oRdIF~lbAQh5k{pFn2c&xu ztS}dHzX2lYG(JDME%l9%RZ%Ts4KDD8hNy0d`IpkMSoL{&28Er}+wB3PnTsmZU(q1C z(nhXs8!=zLIs@Hy%wuth$>xhbamV~ZtFI5nw0e)==7Z%DC?s784IiK!n=#jSh)OtP zbk~yVx2R9(tn#(1XqcjDIb0cYiYR!M7AU;V|F$HjwhFi>dSmYOn?M+nmrT*uv-7+M z38_~>i0>SB)p*C{w04iltL7UDPIXLhF%Ouq9zaQy444U7&|4X4dRxPgId=oR>@v*z z5O+M`<7d%l@@cW;$-m(vasi9O48bK5!}nOfjorxqd(863Wi&&19O5|lo;IF7_8nm}Kbu>< zx+~Z6TvR*(KW+2oJZ>vAHSD~5@mu%Uzjst1TiLOr0aG%C&}bUwr?{<4F6*m4r2}d@ zQ>fMFJm7ELdJ%y1K^-)QO8OKh>gTicCiPiKGdh1k{#NV8H`pLg;h#WXNP|1=2{$;W7&}d`DUC)9RM#z0$ zEwlc~qnsCI5;N#yLW)}vb(h1nB}*5!l@!-ouStu0#KP`V=cxU^21+7DJP`}7@AcYT zLPqJ@Mvy9RuZvDncuO}Zb!W>RxU5ZYmWF<&73Qtw z#hYHE9JO8kfo11^{mJHJ;h%9SM3HXsUvgIH@PjqR{0ck%kz*qZKBofDBZ18(JI*~0iA}tk>v72fU_lg!y|BB|{O->`N?jYkwB@HFgLiaOLUDB}l zw-l^7SV*=Z49>Gk1)luwBTwq!`%F7v&T4~9n)o?UpU5yTz*?Q2IDEGAc5eLHHZA%n z8?4VIQ>*xS*jyosmHv|p4{d7rXN((JC|-51zR5zm8|u9GGD2YQ%m+uoWF6xar+ho< zZ~;$8*_>NQaFX70!Yype9`pIg-;6<)m<~?>AGV#4KVXU40MO-k#JuUPqW5!GIh76D z15~N)I4Vdq8H)XEaNI>uVV)8#ne>je0W!EGb9BzbRp;+Z_(Sr$LY>mGzgnA>-v6sesVqJ=_bm-Fxymp4BIH_w3UovG5GWUg?APMvGj;(@a1= zAudYJ#migO&%Q>6Uq{Wh8ymp1jZ$B%fKpAT@B9JE)IUs3#k(*|623iiF9;8(R<4t0 zyTC{KNVDzDQYnhPC%U@#Ru_1%UeV6ApQ;rL3Rc=1vrB4}FumO2y~bqAWH&aKG`WYr zSRJvZ8l`ooM%jW*jhR)-VX~r7YK#l40BM6SiZkjGr};UsQrYeRLKuI{RvCP;`{4dpB9p zGyc1+17_m^MAKMr_Y>7S?Mf3(cGIzhdwtC0;-$svunKQp$!38+Iwo)&$^kO~P67k| z;_qwXFhVvf95s-3m|_Nk=!WVE`HGgDGr`EWczu9wGX#u|gjXDR#`B(^m$c;yUijMlQJ-U#tb4Fg>SyFL4id(Fm-X za3dupq{_B<$$RZb`-+eDfA@Uey!+?fyLTV{@1zgfKX2aRqZ7~(a=rUR|5crv=rcgl zoX0J{e(OsxwCDUiF;GgwVv2;f;NpLpmeT+4ywBTz{*n0S>?sfKs#KRJNO-l|Zkd2< zMdk;6*K{fDP=BeU#yUL5h+r29EX%Yk7ennd%Z3+1Vc~EXzjf<)m#gep9 z;g_wt9JL$WtZ3~Y`FU_6bShk`4^-?$4OuV7gD{IcaSrMQ!uVZmk4ATEZkZxz$c~q1>;^@2Yx)89HMUeSQ4KNaK2M4J1ItD>J0FfL_w7IQUEzKZvI?cn72_(oGQ z#1St=o2RQv!WTlgQJVMk4FEEd47C*9u}td5$e}}X6_AyZ>*JYqDb6^U~MaTy9>0ScrwW;oa42GcQ`0L-Wx%{Cy+TL4(?aZ77 zh{YYWCOtxUf89$OwqvVtddQ|*&c@5;a2Cb(?k3JbeZ9%T=+5H@5<|FBPSg`ye^eNf zw&ctX{@%*s2jM5^ze_QTTa_OwVFA=@L-pNU;I%;+^V1yvve+|SIOdsXPZ zM}7>h^5&(Gm+UGugfGxI}%2A|66NUc)VN(5@~zus-=;wc;>1@3$P(_SWttPyOS^HscYGUzOK3QJPvT zN0!NQRVz9hki`-4LZN6?9%RFS{h>b6%fi=!=YV<3)e>OjYc3E)6A}_-vdj;%g`=vvR&0(Kca^TF+)**20*? z%Q^ty=UNaw!s3!qo}H3!YKw|Cs}S6jUt#f*Q^iKA(iSc@Q_^7QcH`e#1PBh&z87N9 zFe$s`TP`l^w_KvYV|>A;YwnP%gJZfy_{0YUunqZKN-Iyo6};#XZowsSqUXArE}-LA zw7xRGT@AxgmLQFnDNE6q;a=bBSZ#DgC3Y;ZUeccwTIz|$VMT%ml<O`YRwQFD$QDg;H(ZG- zR?j&{7wQ~2g?CN)yr5(4?)8}D3|O0Ps`A`%!N_9)${hCYGMV33XTSxrifo|fA<$+7 ztnEH)xXs;&-t!`)(V&&+ra`mxy8~4~XWTu%%}C1$ACg**1}halntrYn9>Sol8i>GG zf#?^;rIjLuLA6JwO9#{mjnP&eSb}HLlPz#eQDU9bT#Rk(_uQ(T)BHnJ_Tu_4Fzpjbawet<$9aXyrnf5_;v@qGx$EjKQelidzf?u?!jKZkgd2D^xMb6ro#8 ztiD=sS1C=%l_=Y5wbZko)gD4t=8qW5BN9L9t++shhc~SIRevL$SnO9}6Xq!5+_+xi`6i0s&RciuIwN#BVsS&j}Eh-;vQ-5_%m4}4K z)AJ1#to+#%Kg~){FOu|YWf)=%#5kPG8D5}RD5i)Oq_bZZ?o?cn_T&_Wtw^W~d7MLW zZAKU@o4#WgLg!Xf-{eoeBf8oO?m^;V0kntbAxOzfIWPlntMMiHCic~J|5JxB{@bQ1o2MBU3s*0jpe{_QO6q`v_E1N6Im_F^j}< zZ_)o;>!G|vU9$o|IxY)S1NET)5BG1D14bet-fv@VwTm-Kb4{7?ocmGL3Gh;(?w4_P zWBq+;i-J}HV5?a4Lxx7rj%T?xrPvH9kWGIx4nPo=iNYH!7DFn}483nobMj5}(Dt}t zAM2DowAS3&9I$s{=piLu!gGu}AJEqHn^hu!Y|g2!KIp#BBzP`*#QggBsetKaQlB7^Wr`b9T?XEkrq7sS;n7k3tGI z>RT@~d3{-pxfcF@wgjFJdi!H)r$SLceYD^vx1yygY>PEPotvufCNyM<9w2;P@+fZ?`k zmY42{Js;2PsK?~T#8N<@Mp8Q-x}d1nc;g@~NP}%>R-;9MSYfCh(^;mHITHOPWG~^p z+>wP3EeeL}OgGsETW_-w%>xy6v~IdE?LmX9seOOBsBuyBWz4xuTf{zbkWQZaS2R@R zFMEG+Kj*~TN5X!kg9OYK7FX?2CL$yo0*?`*st2&lNoQ=}MX8eQ&dy&$R>sV_L|R^y z?o)i6V`x=uzbnx>I?e=$Wj_D;Pl@7xcW?E^-V3^f%%A7YVB0 z0!K@fzH3-xd@8)=O8iXkMnQYpoCf{FqC`m z<){w15V0uN(y=6@KHbfDh>2_u5e-qb$M0irv4;p_$~gwax07q9vu_*WUn^PMQou8I z7Aw_H=~rHGo0k?#qI=Z<%?`otB-v52Evqf~Y480TJ%x1BbIAKHU(tf$t^S{k)s%xC zsPMvU9>=rP$=H}8j2REYI7@MY!d}rNJLC@s7ljWN=MJrk8yrqgY^~agB0sbkra8WlaAFi`hBsqVxZ1X&>lXKI-mII%!_nCzR`tWY=5Q80LO7LM=3b-v9fElzb zrH%2d+)^5}HQK-%{Z-H(zuszS(_5ii5#W&AN8P>`hdbbk(kGkUW6Z@7VQAm|o%$@V ztt#JKvhZR0{_~Eb7PMcz%vV`*Kbvd-Mg)myc%R_4i4Y3qz0mn8-|1120yROXSS;?w zRR+ZMklG8bysc}`{H_VtZ+T~gXEnXM1_Mp^D76Xuo0GEWbOJ4z2n|VFJ@=A|yU0u0 z9l!fBw`#0W6|!;ehK`_E&bIKn!NOjbOIjD#JnGga>evM+>E-btY#QbBj6f=A9um{C zJ^||)6Xu`vfC;fb{r1GSF%mELb@%)p>Ea0MFJf`>xdVKdMPdI<9}S);jT*ivAM6xR z;VcVX4jZ9sGlKr-MFPE7TaK;lu2O#JS7dG_sA@Y0(X(Zn8@da3M;%ji-l!V=^G9>(7ww^KGQ1LnX#(xp>H$f2*5HOWy#x;;U-9{@5nkpQ78bB zglhGIY?e(q0va6Gi!)M1DYW3Pj{XSpkscXHIYZN?$SGIljlF2%0W-MZ8u0tX?Eg*AplJy~~D z9y^1vaN3bU^X2ovhlU`ea*NytRq@~7?Tb%*s-cfc)uLt5NzfvDA-ej>W^&Hb_a)UZ z7WX*(>ReOv3nL4F=A+<%udFJNPsFc49PrU6Gv1@dP@|U(p{?XWZryxr*@S5K_^ZIE zPu45MGm=TOltacL!(8(j$u~9bAOU0=0$NW&?=3^hi#&_*EJm)QpQWAS+DUB7n-1z- zB9^(iQx7r|%qjC%+gyJ!USdI0X)-sS9>+lgI|7&&xKfx(Wy7la-wp zDEXaubm_N!0$9LC_!>62IxEHXhb)9E&CfKq3A2{v^nv#zZ8WuYSbU(k0}6^aS3g7V4D?W^)_5KuwJErVw9fU;c4se326l54DD&H(O;CJ%E&#(?lkK%Oi zD2@e=rcL_&$Wf2ugN6F4sm{%EQ9h${Ju?QqbX3^EM73j@4K&G`_9a4Am(IliJf(Ss zM_(Nz9hDVg!XlG`@l6tI`glP$tLN zpi_~iVJgUrGBnxdD`Dsfi7&sj_rp!UjN6&I2%T09OYGS!v2MYRs^J{8?$+)^d3N{Rql&JMwQ8q$W*Vj^*EDcxtZ z66%?Si>WK-QIL_VNk7C8DvolB-RN^b1^d48MJ2#f-OV_ID2yqXKI$w1U)#-}YXKY3 zY}*#8MY7&s?Ms|}sbV1#WypL9#Z|oyIp&wc&|GB`R)Wwpqd?f&3{HG(VxGpm~CFadx@AHAKb~tc#xgrUnLKj zbYbdAi}T%JB0E<%I!UWes{Yno9^>FO4e775Em~Q$Z|>ELp0!cWv4dshxjm1PY>E3< z><`?Q&tEyLB&g6-)+|G-nhtl`fK70<%N#pqXQ)oYuECivXnw?0{v_+{>6-7FHANOl z`uppM*vQE%T3Ll31FS}f?uhXjmsrkooD`*m_*vAZ$9pIW-R|9&DH^3%=~00ux@$sK z>zo6m6bp_aVBd`L8U=9m3k`;ngL3;EU$P3>+(-Gp^z8Jf+TZ0pLDnTE8!Wl0t<3z(a49$|6${T(L{K-IMdEAP^uc}MIyPZhJO#fg?m{v6w4qa^g> zoG+Uf6x*G)H-rxLrv7loy$IE&rC1e=$?qI~re5lvou0G4yKMtV=1fDTrh=Bch!Woj zECcO&(9g9oy{6P2v$Uu|Y|c>aotX#QS2O~zl`_97U%K~cjMb3tdRGlSi|GYQfr2L{ zRI!u2WqYpDS)6qtt-FfjQFp!8UuT65)br5o7fYzzW{AU|k;Xfl#VyP~GSjZUDk;ZK zsTJ-2)m|)TF8u1BV&tmLImCxhrSWoqZ*0^S7ixC{u(Zf~k3&`St*Kd25UPB$(mJ;` zyLqa{;OP=-4RZ@?pdVXvSXL#BoEPLn`E#>+t&l$CMybullvEy&6Fw;;?m@1@OU;5j zrw8RmS-zhIGh4029mscKF(;13VjF(yIQ5#%i-?DAavthT`U_FmpIh41o?!gPsf2?t zeWQvqW5YP2G>b9d-LBSYGv_sDQy-YU7V-g2{r5P9yTD9M0zoT62{NN{byWH?*uJpKiN;)d^BP;<$bqU!^i+f( z#vjLT2{#iV!;WNI{0Qd=oeNbC(|i!q&yL2M6L1qdiJJEfp0tVd*z+lWCP`HTq=Zr( ze5=xZB5F$TEf_ z21||B352_28#Nx2;h4suRSs6OO@Uj|IljUwk2&OcZ)2sRrh*G(KTyFcaZ%ihhFCA% zo9djs&I_yd#iUsJKbrpZ$xL4pKfiTKeD?%OlYn-Is3iHP1{U-CLo)@k&K0>})R;L8 z_}VbuQ@9eVp${GPC1>p?+c_x~ZbcnkVvfD;-QTRCWV(?ajTQ8c8+zdY#0XTl=}WR(%N@~#JCY4sSn>2CjmgjvDVav& zMUrs?N;uq=&%R0OR5T)nfGwq@q$tQ?C=lt^La27QWDKuEDTq6#uCb|<1FNWc6BdDO zsEGrB`+wB(3OE5sm8Zkk79h!=)l?D05wC6Yx?EYc3N=uIt8F8 zPZO8K&IYp--N7NdxiQL~4}XhZIQyfHex+kdcKDlCSXnCQQRI$lh1NWGj}7Sa*euD3 zY|>>Mp1s|b*E>D11pS$WW2%&fEV{^ev@qmZlv$37Yi^k59C zVFuNn69>GaE!<-pFRaCLE*zDxa+nTWd4WnfI3dFlF4svsLAal9EP4Dsx? zw0T>jznfN^bi(5r^+eudbji};%4RAAV7vz~jRtNU?@qX0B45!=f!oK|ZZEX6UIC)F z&l2Tgi%zd-oJXpH@Qv%I!4+qTx5;WBnw2flEl~!O%*yrzXPOvbSy@E5$#&Ue?0A%` zaw@Bw0)FDE8>m|T@HXgeBIXmU7F;(*M^A5QqG@@?73Z>=4b5invm2!ax{;gXXRaid zx8Q8FfwH?lUNPLc?q;QF+8Y(WNTuBBKLsUTQ$7izZiusX_n(J(r&tmCR00V>1a#;>9qf*^5;p1RL8G|IoA~O-Z=w zEKHbRDJs==cimbtePK}(g^a2ZG>Q#lp-eiFYQbjGrqdr(Ii`mRUiP^N{mo)vI%2$2 zPJPVPpEc=cjM;X zzR1>^Y!kUIplH{{ml-sZHEiUv^Q>1;Sar?#GdIr|nn&x*=>ldcBiqB$ z>ZIsJ7d^8nCUSl;;A?K_riQe9T8FHC$vM=J`Au3MN%a|ua{Zm%yznCRUS^zVq9(F? z7BRTfi11-3^X<_os<>jVciXdho3zBOPNkIJs3io-v)(oE-+>->3Na|@KV#Yzhpe%M zOvu|6^QJdub-toai}#7|dKk8I9~pD^(WRn{?~F25|L%9B)Ui;Tk7@SNi9JUKlH;0u zZ1T@PO7;=HSHgHy&?yW;uZ=U5t~N#YbW6l}G+F;$GT;c|p4Bf)@c~s2P3*Z zD8VL_c$aQ@n<{KF0D8|(a;uf{%gP2eOVAnjRV@_6v4?jGRES#z%b$c#BE~Nh8%B&X z$|}D3Sdj+6PtSyiteg_;EEzM&1SsR+63HK| z{ERj%G*at**m7X@tquJAcc(A`H9^Umz4N>tL7~xE*|>Abrrh3I`=RyWxODm(*z!GV zOy1it4VIdAF?C_}xEpFRm&Hhp3Of=>u4@z)Us1REp=DfF<@4{E5mq)k7PxYoVOXaT z7QL)!wxM(<=ldl-*n;2%OaTeeL|)*ROU=saGw#ulvvjsc8;Z3*CsgfcP-bU_Sf;E5 z<=?~?gy*$3H*WwN+rvv&PSo{sl{X`eB(N2y^Wm+!%LiZ7rqh zffiX_JJaMxG&Ci#_F|oxZAkh6i?|yJykk}rkQr@OvTxSey5+-%!F3@~YVFh}zvI<5 z{**WqUQRZ!)emgJmoLKXoR&Zo(XLe{gK61Im-e9Rq1&{jE+m#J9@h=ZGeN6~pxwOT zinm);K_51XIri-*G!aC$nSxvC!jQU~DudbY7FUQQmLeGC&`(e-ZBy+|R%dUm-7J1! zdh8yXt{sXEV$_DFbdFReYV!;4b;A}JC!nmCvQ$v;!SDfeSWj>=nF16|=1(HiwflJk zBgJ4dj^dhd%=ey7Gq5ZM569Krh(xnKm95zgL|>76BClQ4Dr})2v{=HSutSvuldfN_ zE*(BioYwW%#pvqamF=I$tJ2h`UV#eWt`=1KFo2Tbez3HS9u#xJn_B8fa=#d%&Ca%G z`WpXnKavHD{lGmwRdpw>1!1tdQ7PP`IZ1P6btzcdRWh1J9o#UKD}7J)RMbj@iv*h| zDD;WO{>t!hg=4&o<^76TH@nm)+Q6v5UrgptGBFH7#6{9$VKk8U%k#G~9a*^#%5i?n zV?6T_j!S6(CAFB(-%(wv<(+)tWrB|vr2%!xIcvSkO3$}9&iLFmnr;Cfd+q!aI&=y- z_VCq1Q6N9!;zoyLXHghY=Ht>!@9-0WLaxj}#_*-#_rB3`vFd5y{|7(U*$v`m1^GzDWE^Xz<<>pL?LSd;Zw?64EW+ zJG*zoP`NTVJ+7<56nqy}-!GAPMVo*UtEt_2{_rv*7=(I7+k5`_tn>Gg_QmwqE83je zE85H}+UNuJbHyv#-7DJVOUNslT}#l&|0UuI#Q)zumsd2}-vO2Xm6~~=c6_8(`?c;Y zK$%#7uxda%`-pun-v1(3=&<_T(J$xY?C-$RB^{pD?;f2Kkw*bpY^{xkov7b8FBW+p zk2;)bgKJ;WY|dYb{ND*Ys%<^XzoIo5cJIHUeLM;}%zH(vJ%1_uFM;b7P2es_{mvrP zA?W<2Se&RT$n^ZB^dJlcp7YdS(XyT-$~v$m{z^n%6SZ_<6eVpSNnaRJwmycs z_Ine0UYw~lW1htrLVu;qU;4)O5mJ&ZR;P&Ir*I@z<=}}HzS#_x&A6recEo?W>7`BU zrl2SkRi1ne)Ga+*a#A%tenkUv#P|h_tLCW9#~$im!HR)-$Z57|)gVPWDHn`s_34Bu z>7N~GaOHbWP&#ABbDj5@r8^?)n-7pQNU2&{6q zd!q0&nt@iwz~bWb7*mC`G7>PA%pSqXgf!K5EUIFQdu&XzdEIbs*))3XRzxcj1U1R2 z3PbjIvddM{41AB)nK9`<=u$NmSGnh6pG0p5t5J`U_b~evdeMy#pro50WEnL!7)i!5d zsOZBZ3|Cvi!<iTG?xj{yLf8c3Qf*B5{56|E_#wT1GIqh3xV(uIu4n~DbyGRn{+ zZRyzJ?_0Fj%YgZHTNZ;fa4rLK#>la5FD?6u#(E}~fX@N~@1v}47>0lux#$Sy1S zpqJclEhN+8NVa^pVp0_Zz;+1E!;{6yGPUU(}3O za%Xb1&K1Ga)8~M1w_5Qtz*Q-ItfBLNF$)b{DvN1)H8=wVTXf;;F%-K+W4PI!MMBj3rR= zE{trX7>W|dKoy7j=G61PqM7K=u4X3{mDHh7GERweyYu7?Ze&QNH?0r2V$oKFSB*@q zvjeTA1YAzZ`+{<6UZy>)tAM(K+Hb#g_=kt`?X%lS#EVv;ScvGMcW!M64JI zxQPWoCJ@tO!1nEoq~_bnB<_^hcW=Y1S2QjkYZ!J^P_#{?9iq4e*B>wSW8tXMJE2Qe z{LJNXUSY^0s)O#l69^!sQLv*(E4sjPL3GS4L59WdpvKNF*eGt>9sof=zP~MRC7{^_ zik#CN^l}h7 zT1al~)|f`a4Vs&w=zn|ibjZ=|PgZd7;g%_igHR(R;qid-T0@HVn2Mu-E~A|WSHfU1 zgj4z3Y#s#3eHI`EjNiXd$?qy>FlLC21Y?Ls_oF*HVlO0oi(+^E@kAa! z>DRtBB8=@}Y6*3;A~g}@qtKSg%^lEi^@9SugeXw}M)>>|YG%U)s1c`taC8jgS>0y5 zvhtQnT9*aKUS{4(o80G%T3bRjHh@A-dUO`9J#$a-A5l`??%&$n)e|AwLo*#IGU@G;+*#w2!-doh1}=~BUZrc^i;ccp3G0Gb_^HVA*mM;=8}po0T#9aY zHbu;LeRpDR8!{5PkXgI*%=J)!dKh5u?TCNO1+%Xi&1zIa7iPbtMPLWmScn zU$}JA+V$}xb?jE!&v{%!Lm_;Dt*5^~2q{g!TD&^Osc=6Han;z=i@+{!hdU>5s~UY$RD!K? z42m|U?(17ziGK3C6BTZu$j-c_vuR)e#ieLL1y8NBZbDCFa_Rib+?r`yXYRrPCJ@c4 z<3%~RB7I5a=h>;sX(hEIqXJV<&acEHUFKF6>9svduOy^|vy9jN_$paHO5S}kjgf3A zPc%bpK&%k1!tM+~2|ok2&XtyJi$@gxlU{vg%sZhiFbfN7^o(hZGuzWt61|P-(FQj9 z&h*BKQQ?Mr+RgGZfA41%{%~~$tIlD&)Ry)~xs_`^F3$kwS~3(lql&AgD9NC9N6p~H zqK$VD6MJV2ZN4JID6T&Z;ntefO>9@5%WMkKP7}cliYfum2x`iZeQtLIOHTWlhYGl; z42!oe@vcUks2lim42Wldi1}9QFTWOZ$Z0WAS6$$WgeU*_t_0B~H6nUPqs$2$PKM4U zX!gwB_RJ>lybt;+_?X~&PnITbvU4!PY#V22rB+l2O%p&>mzGAvk6M!#HxPppfkdjQ z?q_1afb^?zN>`w?5B5DN5S215dms> zkdHt|<9HN&V1iPfy%rW7Nb{-mL{eJs;dTj!eanLc+II$G@geYs=qqRYm7e+HbcBw7R3n@A<){rM$651YhTOh%S;<^i% zRovRGJoSK{E6CiD+r6&)G^BRd?b=oS`0Q@DoMlr1osg&C_B6!uMJdFx*>#yz6IbU)o^`A zSyItf0{<>5ttGHZA1$+fcx&F|#Wo}REg9?yDP&FHIg=#F8cvQvFxmj&46RDD9D9<{ z4n5?1uq@f<;Wm+isjbNX8LbhO)|62yrFu+}vJH+S)5(YA--jph&YTkWpg1rrwvTo0 zPyGVJWDjTItZ|D=QD9RrP^f8%2$U0-oS=f-f)P>eHF2nYoMtD+YT8)UZVt#1Kk*o$ z&d|yztNqUL(9w9!#&C4v>BU~BM~$$)k##(xR$m_CpU^K=bHh7GsJ%;l>>f{jW|qTQ z_86{_?@~q(0563s>vNumM;HaXzofbKpy1jGpDKIG(p0NBAzW-y(TU7mN+RHp19$$E za~W3?Bx`lEsm^rGopnKgP$m`g`G5A%T3kWa)XDTeGWxp0j=RR`2%oQp|2bzz@Rg}> zql7)7>WOnN40UT74!dpCnIiHI^XIa7Y6SEtB=}qez|O(Mx6{|osgsRC8RTYR(Q+u) zAyjRxRmSCXp!ua)Pw0CbGB(5YPbC%oI3t)117f-?5+PLh&vGP4Ijx!26&gQSWKWzo zfaZf%9?+^KLf*7IC2r4E>RO;;#=_Yo|1$P?y+F3m8KWtVfSgEGq?KJ!lu_bWi3I(` z2lU|qDVSb8cyTuI({^u;^5GVRAE@(L@L7eUpju@f^s>E7+zK%&K+5R$P*{2MKT2%s zDhbTGjv_Evj_Z`K|L&!LS6D4MwU^xoe7riu~RVZ*B{c`w5RI+o< z`R=kn4=!343#?2~YvK$&WqHv^FaJxY`4Q|unR0PrXI79Mi94;q#K0Gq%Usxo>5soD zljOrz=?(flAXj(PJ3>^Ih{WE#gVFEs^)LgOa&hCm8PaPN0gO7)O68zWSLc?CdG+Xf zy{r)jz(?4hpHsndyZUP>D$ZcjVhW!#hDDi-8=@$bx9!5X;@D=b5_tYMT(>-p;NWp& z&pKDp=fsw8R3GQDK;dh%?84b26fVP2wYmW|wkb4cKpBZ?5K;IJFUutQ5p^c)-T@Qe zE1j_2B8-TivXDK@yNEei(snuzu)XVHT8=J@8(kN6)yQ&3r9M*K?tq}=53A5l@up_RF^6}Q(CgxQ7L2Uu74F}APxxPdM9%iN6GH& zWku9yWrvG@rj@VkPBU2Im%G?-IpqF|c5?ijJEqi_NRs{AnCvF)p{KZ^Tua|v?C?R? zz1-&Ko&9=JYN;m_+Bq!{Q?spZd0al8yf_FxU#<`xj8CO&Y}Uggf#rJbs} zrdzm}?WHg@$vKhduEzdLx}C%v!HXJNJRJKy!pVtoK`BAC)+)=GL4I%xjYPLdfd~nC z%aZnl-CM4}SQH2=%c;19be8Lehq8G}hsq8YtOcaw0HnRbzQRHjaU?R9 z0mC(zyig(V+KDZVC3B|pPJ)oMF2I@dib#Ie@`cUih%by=qerJnQ6%73RSLs!lVLaR z4Y;(OGL|E!TNAZ)*-ya$%J)KH3aq9GkSCm$E%Hw@MbhOfEHDV(%Ea3fIYwG{r>ChZ zK#@YLAF2}#OH4gHE>^{iEOoVB(R^z|av&|tflNeqGQtC8BQc5V_e37%3zCf{v zXIb)kovHy{!dg*-)9nC`frW}Y-s`KfJ@s`&2oCpIDP+S zO`(@C%S#!|PibVN5REJDaB|ZKhr@6mH|E)(zDs-OK2%8?cd6j+FH~hKUr;ljyC0DX z-#3p;;XQ1t^Prc8{^i9w+s{?C4zdMU0;iHQW!fpW`Q&TS1U1BR(@|UNm-Z=Ioj}l4 zR{VeqIOPSapaok5$!-E8_N;N^DEThLL}KistJGT4RoGLUT(cI@Y((5aK%j)g_!CPU z6bk!B1Uc8&8Gw-pY1ow_`t8@9qYbX;A=}yM1mqSqYt=QH(OHiT zwvCk=5o7uR6C)UX5Z-y=gV26po8Dia`#tT;4Qv$**)a~CVK;nmImG~)2dja+Z zsQ{Hq9kEvSN#jlA@ngFCOXhHafl$c~(OccITEGEx zsX##!Je`7u3_zkXK@}zW2v2Y|G6LF>*?_{NH;53RPyRu{-&^YR|=ZyF= z{A^wRom>5pk%MF#vLVP__JAoTHrj=aOdkxV$|Ww#(fsYl9m(LgjO0UEvhU08AN9zL z6tYh;mMgNbcr$E&12+2`4XCfH^X3y{;R14D2 zVo9+fAo%@2F2M1-81Y&(5I|2q#HpY;faLRXk$S+kyg%0^$&e%D7}5;Ys)HI89QN@} zk~BzMkniVSQN zkwA63%KP@xJAMhZOZG`Ma)4#Q0npH?4Qcs4-%wdw<3x7}>ZecKCIkKX_$e3P_$8Y7 zh`-Nvqw*cp&6jEwKC4->RO&1jKEH=aqIEp$^ei7BXx+9X_#i^5QYdAjow73R2Nhu| z_mM|%m(*nO&Uvzdt!C29M>*-wfCY8Y3$3Xl*0JSl?EN~wYuT6d*o?)*$&IrRccHcNOPA-=2MjZri7QA@78saDsb z@>!H1yNbN`Ob8fuDOlnpxf$lOI;=L6e9Aop@a5Tkx+y-fka!O`cZ_bLcT;g?9>4`c zEU*5fIvZS`J!r3-(C*HrzV*PFAe|h>G6Nz0otVUj(#@ZP9r*$uQWX6TS`mIpY46l` z?6A7=3rmkcdNxRAYf)mE8uHS8>)M;Byo$MGZRSpKeA3^x6@H8v?E+xLhAhUAXbgZB zX3?^g6bR!Eh*H9+-b!mk%3S(+OXpUyfKv34Nrn;iyTya=K%6WMf&$>-4g`4k1g=_n zOVzr$LC-UZlU*0H2SYhg6`_0(NWF7r}_t4w_#)W!WTnSxb(IY@e@LwUAi%w=BR zW^26nv1Gq5~7b1V_-7g^gQ;8_I)m|7p~$Y zQejZ$>f`iy5YjHD$8B?_#>lhZk(am6wD?qn@hlVWf`f2iT0T5&Jz`M`7LxN%gG9SR zOl3eZFPWkNqWq)d*0;`Ytq-Mg^;w6gHfG8TO)B`qAyE zklaEnwxnn%_|$qhLq38YRdi3mpUzy#=FVCmavAHe6O#ENLzSF6Yg)FfI5@-tE^(|l zWX`eUW^Vw&Rt3lLGM281G@0@B9;-LqcZ1w8p9d$+G5+ihZ?-WlJ<8W~w1g@#kfI#c zY~HO8mJ}5f#WQ-1+7-9AWf7{LBrgFK%WjF)(`X?RtAxImm&)*^{|H3TIy~|7FW)h$v4q)0s9_ir7Vwm zZwGxYP1FPR_+!vt(7o_m=u3k>EFphh<;a8x)5?;oMX=tppEqR}2y^5Ce{1ryk@Tq* z#A6n`qT#5j^cy&*L6lWS!*nriMrl-qsA#r5&xOa+zdUWX!zd}aWup0Ivp^5(Itgwt zh!Otw7^N$-E38x1YmOv@%*?QGR`1NK-cn%*lp?U~U61)N; zC=`XKZsdnM#Q8Qb*|mrz;k5Nq4Mq15xN8ahUxZkcHVZ^sr$@Rcu3$M@X9gvi@FIUZ zJOvi0Km3fe2zPx@rwbU%-~9P&TX6G)5uumEW@$K{>hYam79z7E&PKh-Us;{ z+ic9~UD-I|`Qe^uGuban;)73kj@5z+9WlyB)EZuobc!b1zO-rTyKz~6qt-=yi<8O0 zKA#cVVUp?=Fv(1+U#HR6AXp5H*l1Vv%PsX3RUr(*#OO(AJeI+&X!Ozjgnfx2d)Kx< zj$kr5k5LnVlW%9k;ric{{-w7YF@a_7ZoRw*O}>rG!{@zM*9YDa_Hb{}*7Hd6|c_Gsiht>8oGHaPSg%&ItXg}F)+lD<@3idOBD z0N*eDaa1wf$p9+3H*wPdtW08j-$Xxmc02Ph0?M zT#`dM+&y!34^`SUAzrgvq&kMT{to|Ov4;$ziq3ZBMX?)mbpwop$l)|g(s}O8boX9>Q}uuYom9WBwUh=HTyYPZ4KSz%B)s@O6s$wCa=s zH{2~5yJezRh9)yATt=ZDVG}1%C_MLD(cq;DZ1+T}>z$ETXE9Bf+5&TT$ZwZWbq3W# zS*67k?<#IF`}u*yb={c45HGl%>{z_7Su}Gj+0UspGxZ_zOBKK5%#~mzYkjMg%^mv6 z?_M^Gvxak74)w}6gQgYCu<#h=k`1M3s?y|N&(g*1BMva?mlL_wSnf+*PL^^G3*9ua zfu+26uCDN1!@Hy&$-^O>S*6)JMZ@i>7Itpbch`xkogU2|Ap$5TD1(8H#~T1`jf(`~ z?n=W@q>(Tl+x&gyHfNqQcEgw+F_rjR*7@T8$ciJiE)0WJUj|Y5rL0t-n=B3{83mPc zV0d)^a~R~FPuu)Kg_w8Jz22W9^KY>X-Gv6_y9VrMxvJ8KXjsn%6X-$Jo5mEC!6Y3aF8 zp*S%^?)c!H#>s3SRzLkRxJlqHU9!6pR5Uq_Mk86ktVW+SlNOE+hv*yJkmi#F((kS` zvy%j}>a(dYHN|t5h{c@4Kg1<%nWZTF?{GJc>p8{;AdV@((1Q(Zhp7$?=QpK~B^J5l zb>?*^Gb(4$qq@ZR^@W(B~|Fcc`ryvUGdeXw|?-+?)g68J1rV;De}L;`=hIfzGm5>mLrZ-2a>S9yl0sIrxqwX(!B*dYtoAy|gy2pjVy!LFI3vwQgWw zQ?luFE(|3DfezIZu!Y+5N8_M`%+Ab_;OrDGn0rM=mk>i$W^O>#F>AxIG((2#9fM^I zDrH{p?D~{yCSgV$xmck6KxtGLPRta+3YkdTAKpnU9WjkyG2eCDED_vo3+$kb@2_?` zV7%l3GxPued~9=h{1VI^F!%?$1~+Vhs*j1xEpc>!`ecwS#(=l=UD7qZx)NjR_UtHI zjnYh?$w9T1{;C%+7PQJQ(@^p!w(=mPOvp8|R8dIIMHEKXKZJFBP0+%#sx-HnYL^2d zlGHv|La^JND~>m;K^ON%h|C3Mp&COHjI(!l64vtp!#ttZa#UIFtA|@T5)aYluF;co zaK^kcwNQYZxnbk6P3txG;_*vZF(nC(Z`0mYOgj_m#+R^Q_=e_Nae@qk1*sd4TQaa6 z8rw5HWd5gkhl!>i^s`gi5@83O6_qs^bA_8&!4@~;;J(71T#`*2^)(k{cJ!&(v*Xl8 zyNBjx$cp`KmS!9JFN(k~FAZW2w6*y489}`iWrKT}GV+B)IpGlZzmblC6JHR!vKT*yZZ_vVB+lWw8Xd zH67YVn{J0CVJT@0DW6jEd%$_f%Fr&iJEd1N#?1J#v|`flB^xQC`^>vTt0tvkvQ#Yi z+W(<8k%>+9syxW?ZdFW<8>N--L64jU%^R_y8SpBg+Hn7#`vriwS z+ykGZeK`dc&sD$6ymwl?HKNVQHT)p? zFd{&cuQnBe&lTts0Vo?90TA7>RC>FgoQXx-ka1KxgViP(+K*paK#7^Mg#+% zmrQr^u{$3}63gc1i*fG#Vo_DKEMD{r{yEag>DdK!?T_$LGP=@CYWF)~wUC7-C(2RQ zNF>k)D{)0&v{Cnpy}pv-#u_O$l~-?4NJW6a#rK0NjIQCWm3hbPqhy{vFZEKhxz*i# z0v(5Pl~d27A+d1Rs7b#&|hn}gvv#6!|SOaccy`5)QrNlf^|ClJc;ry@gCd)OQ&8 zW!U4~G_JT$f%zZ~wK~HV<`X1Y?5W4CRChT}ay>%?>k2@()UGlpI)q?3+LUoX{J+*j zR4m6hX7%-na-^!lbVfT~(O{gjLLI55#@L8)`~Z{qbsdif=@`j8`i~N6tcK!JkwgnYFhFnM@%nx#=%dn<U8sMX--eU{Gk0X8E@SU|9Q(5!5_mAA+!fV`}F%>=L-R_~%{_28T>uva_xllKkor+J*4iZLBARhy_fb;NM!j~Km397FK-uh@e~t9T9ErmR#${1HP#Ks?y*9>q{^a1o|Hyp|Ga_tl?sb{?lnPrRRFk$%Aa$$%KJHwY zPn726dHRZ0rs>zhUCA|Eurg}tvzd8I=zEns>#>tTf--24tIzfwNRtwNMN{OvwIQaD zzJl-TgKPFp4C$ZaqkU_zw2H;*&qnOss%6DGH%6I+esxl#>`wxQAW%w%6Z=w@xoN0y z>D%Zjtd${*oM|=eMP0qp&e6o+@x^gQD*oY=z)7ai{=ChBr|!L1w51QJyQ6`HZ*4c% zT0*5ZC!X}n^-SM4##$8FYgY*Ja z6x_GvgBq24NKA{}u4i7*t6Y#Q;AD~e-wsQ7m1q4g%23ny%~|ZZdk0$2;frAFWGbzx zGL4B@B)H;m;HzlzpP$?P*{!k_gay>{`0hD1laR$z-+%$qGcJfEIY7hZ!_u}GX@|nM zt!tWp3gYH6V@aJ!?=xc$ng&yUgYvco;m=YDWV-{vc>pNzFD1k9@1tVDG&yhkQQm23 zvnc}v&$oNwAY1rHnwZ*Rt$>CXnSiGZ4ijav8&K{{Z)3TPz0KOqE81|at(WtOGab>8 zs)8{tcb~qM&Vqk{*w~=xh;$Qb51_U}uMZ#`NK+jkd(6cz%+uFL-Y~5imR2jLV`J;UhXozh%-48H>5P2<~Oi1ws=PNAp{>$?-bs z57&1mLi`&8U~DO>#mue4UFCL8uC$!uDwy~HPDqyUa}WUK1vXdX)~6O)4OC!msgYCV>a@M($G$93c$PaMcZwVLs^&F?lVr0|t&Sk? z#R~h$E1Io!QF-Qb&~WI%`RWYzIaBlW-F`+=4Tmj@lY-eeNxecLHKs zEfOy56swsOZY-ghll9xew%7T5kKGG*Df8ta@@b;=g3U@HsIK9?%r)_aS=q6eZgz>& z0_9|)dpZvJQ7%S=J=;bcMoJ2azEzF;YQjqaVlS(L#FVqHz8!=-1z0%XBp-Z;*QuwJ zXbn^p;tFx=+pI-?AmhMvD5Sv`1OIIujsF&NXFB}dN9jmr*_#~c=c?%609RyI=dIT% zRq*7l(Bo}8oH|V70?o4pSq=SVVq(-riVn3g8C8WDSzXhxc5lG~X1h`MGJ7_t`kcuR z%T03!&#p=skyHsnlBF;hJYy^O>=zklt9nB7$chEh?DZe3Zzrz-lOV7@y@sC%RaoqPJ)I*@0Xoa$F5RqDCY4uC&Jt6zG&NQIZ zQu zj_EdBr{a_>>tF7J0M_+)VagG*=vyY}qJM!_&MFF=v)IfLc*_1+S4HYP%ja99{IB)~ z6&MOKau%?rAK46S0>03peXgej=hMhD^nwr6nFyRDudFK$kY7A+1vRU3%Vw@!^qqME7G07x zMFF~?WsTdfqGp>YS%jNU8pBOH(2o6R2&b8ioLo5tV-ld8A{Jjw`Z9G-C+HkBK>5$IZ8>w!CIh?@6&9N*JR@vZ|LHufn-_Ybs=z(Z z+Ohc7&3YCyJ-Jw=7fRbS*r)%GiMI}lYw5y%IZ3c25ZvX!FzDb81DxPaaQDG|@L(r| z;F1|&&_HmvK?eyT=%B%E@IeCvcgT0%@7`N=_dk1A@9M5zUDa#-o~rI=5&nw9P6$jE z?-_t4)>)4#?Nn(me6ve6}RUz&1N*y&et+5<=3lZ9q6nHE_melDUqQAs9L`LN4C z`X655BG=fB71I}P?onfSDOV*cCkiMCO5}{50m=>w8AG0FmagU^mB%nWImlGS1|W=L z%M-8umO;FyZ-6>YANbdMExQLQmvf{XhU0|GAvHOKP|ml1YOiiJfEjM(|4rpU`6>l> z#$Lbqz7X8YIt9%+9Mdu}cccEKqkjXB}FS?-+w=Fo))Dz#p99 z?l~gs*h)c5rvIAZ*%d3k>n-~xw?Zz>jI`KkztWj`-@%eut*6p8z9+Tpzy!NLIJzKJ z#hF}HwQD=)Z>~JQOWz(sO1fVkQB71T>Hgr+8#fi1T2Bl1b=>912LiVV+rWkS{S~Sj z(XqW`l$k=sD01&`m~jUpEGsmbXF)OlhbSquR`utEKS9h)y=@57lNASZ@6ba%8$a6# zT1VC_M2c!)Ei$KZ&ocQG*p~`v3aIO=)XATTuCmn1;k#70Dbn45&F&n0tJ_iO^`E@- zJNaZ$K3%zIn)(OFlc1QuRJZIjRTdi|wSCjP$trOkyaU9A#Reg&BcvQ;zi9K zy##AKZs8oIwUyWEK~L7B87WE{MO|%IBHU83m)u$9Q*Pp;&W*bwXYT&UH=1l&&tyLE;mDP8zZ4Yx}OO-wo zu^$&zD$u7nllUSM@WCSI*kwC*7(eC*m8YJF6>WK|?o5rHoZJm>kw)Qzr84#JKR9eE zgXsvrij?c_2PWRD7~Z)Fo?8lI+Cb2Zt6xBw#`1j<^SyCJr8&QO#*Gl&7!&e8uPPnhy30dXE<_&MP`S9We`=)p zPM&@8L~{ak&7!&hB!d8OwpU3LzhP%~Y3@l^y!D0<*)rS6s(WmXuBh?l(L@6?2k4~( z6-oWXv*7z=MYdF4#JlWk_FEIlRe42gyoVFdK_-pkx$zxv{*)1lyf5Y1NoILiqiLVq zJpToAQdsFY;*%ZE+8-Q((-gyogwW5H(g@nRS!=%;ySdpX4x@!{;3nFg4|sn5|LpDRz}!MxYL=y!>CdKCfu8J}P56T!i)!L}O>7rD#Owg>jmljS{-cf22X+1UI*=!gg1 zbi1Tg&y-uWf31nG_XcEMaifm0xec$=&>9VSKG$?g&9!^m-+KGZFbPgCMvo}EfO@0o zZr$Iz`7*_jO^)>KTU9`BMRbwr%iV=jUcctK39UDU93NMv(+bD@laGvxTZ9uX5r#DT zfgz0=78XK5Nuj0j7x%T8p=oMR+8w(a-i@WCDY0{|ATu zc~At@qL4=ESEHCi9L+^q|1nx_iEQI>#)Coi#ECVOwx_uJ@cO?r(}jEKD)rgCfIni> z_-jjz@*f;~ov-;z?~#W=$&kv7R#I@w~WP2)7O5P|z4wm_L@7oP#vjWk)Y!!{K z-@9ncl1Z6YBc_g6-|c^I|9u^vT9R+k4Vy8MzP0(jY4}5&@Nog%zfGx5yTZCCzXsLj zV+6yin0ADKBM_<(xW*!^PImXD1Or0^8*AIX%i}r{OjT8|!55ZKt*JLTr;x?@K9WyA#9OZ_p9xEn| zTC~6sbu*3q7wv80)_#_o%{}f7j07b!aZi^?Vi!xIvK(ex&rd}msj%6kr&9@&enY6Nv54o=@%)C7dxwVID6`Y;%i+myz6f2E+p)Nu8{o!*8af_5C z(;mOY+6iSBulDbm(^u(@ngxGwdXGh!?^6v|-Y8L*>WkE6kn3N1h&-Hx4^M@nb2Mp4 zK2LtQ^4mp{hLnj zCy25U`KgBgag6Aip2!(=+AoQFtcI#)-lix)z>OtQbrAK@rAe0}G@FhNxJ~^bKn%M$ zKn{FWv=s5ZqU#B%<7vhMz7N}x9U$oI`A|^!PQb=TM1!3EL6d4GH^zM8b%Q9Msj=3Q zSrO_8;;_&9F!{f8aU^lGd3^1Zhlc#WLgzg!~g)a~@=*)6U@&+_5{yr~AG+S*Y=J}yiocE%3 zLWz0&@fcpTfm3kh&qe9hzWJUb-T&Q{CyIDNZvCiut~o=!+1n z{;X(>?%4;W`ODIGud;y#pLO$lsvfkAc!Ml_LI~hqz_Cqk$>%}c~BeRbNMpFkS`=bBa+e5&9#8;z!H{WhF z3H#37r-yPoF)sX*n1o9i{%ym9bi&qxWy^giEY$t#R;+v?Mb}Ymx8xyZG2A_Z)KYv^ z?$`auaqiz&eWg0KGjRKy8;U)5_la0vWZ0#h@XWsqfpz{#FA1G_@f}9*G`WNNl0ISO_s5xZWK- z1*A$sF*@({xe#~eXcc{EW!hO;dsq0Bv0&I;@(tH~VDwHq(f*>7p8jDp5kTG3X6m?- z%MXtCAknsXsT_**6ml9Xa^93etGpN}p+@sCSP?n^iWe%JlR_CqiPh9tz1H;+TX?VO zRbI!_h!Ho;NrVummL2!PZjQw*oZ1J%xv= z0p7c9M%CWRYDMMAZHIb?l4lBY>Jo>>_>h*OK#gf;P+ZE0+wUmr!kKggQYh@w?k4 z>w7WL=3>)@;X$~A*Nf6ECfV)BC^gx#7sAPE4ofFP-Y8l(z4ZiFb5r-W9*t$*HeQp< zI=eUL-O$Ce1Yl-ECw)A&1vW0jvB(?RWRxxl1mCjR-&5JZzyI=@NvMMFVymRK*1i-Jh-wqC>1n+OL!9F|m{ds+L{CK# zRh((a(JbNJRCcZYx+$)#%hZet`ZL1?G~^_HtEt6egL32yxmKRI2FTk*8yH~Ga~Qay z$ONW)8l*TC$Phhx%dYkZ$2J9(n4?MmBH8y3&W4TK2)ovRj@O}(tRnmfI~CUdJ&&kY z-Lfw&1ya;h0qh+kkk(K7)PHcI9?l?WSR-5Kq=h~f z)6cF03%LKEW3BA}=UDsf#dAE||G#4`i6AaH>s#Hwg(&_X$688(53r3BJT|?Co_`eV zCPnO_r~lWk_SygJYU7zzreqc=;XWZJNzzCm%_&QiEkXr)kry`05AWE3ZlNGkK|oj2 z#5l7e>iakPv<`wJV0a73vG=Va%m;|zu8&pgc@1S9IG-jMP+}q~1gx<+z(EP*6iJ)G z^xyL|Avz28dkCvHtx+OIT_bddv3-fTA%xzZ=Oy>Vf@y(j8H=l`wXLlmWq+`t_Gwy7 zI@s7m(ZFQb9kVh^Nahts@mhJ1Vnuw2mMqV9#6nU73>?Uz_nUMW^JEXIZ>9C-y=05$ z_laH%6jvUnDdCRzT&r)&mU>Ct^b`JZG{)WAo_&52fEiFCZ-5hBLrn3!ZP`3w0A6^$EyqtosQzrd85f0#DH?(Tgk-h<#N5S2HxhjAmm z(gFLw2UTT|K;8tnSqdf{ekHPm;_q(o1o9D12eyH{fGxo)GgQ4Y#koo~K6Ey)+ ztM7_;&JV?#iB0T#fd)?Br)ni3Rq$br$XBmuKozuMNU38zbEogiX#u02a?W$X)ZrO- zOsZpQ>}pOZn)6g7RwozOLkWxlc>ZqF)u{iBa`a~ZzmFtQYz6}ikUmcMO~}}4XRYVu zd;r)91npfftrK(6i83g91?cilGHkF;Ctg#rXV)Q}HQ8=lh}NTAI5w_usZIwcz^-M` zfqVCtmZS77*-0uzUTBNut-X>R+(tF1w2+s3+FE_#E@tU#7bvbfSr-h@-Slila>m4( zgDKI&Jb~e{4(Dk|=i)CeOq-0s$4Ve?pIkC{4Cq0B<(`)MMF&YEGcSU)GZd@s- zfXaZdPC)N*%W`MSjcB`$SVV)x^hJ^fLc@H3bR#f$cH^3vz^U8nH~Z^EuUCLu_S9^k z)nLb7r7JDH$Mz%b!CVlB#X+`PW*$q)>dd^K^ZJ(@(~pb}2wj37tb>~O#G|LAU~g&0 zd>_OuRYgA=%yQ0so3uNSkRAesS!M!c@j5SV%&~+9NUW3N+(3leg488nwCRE5Q*5qC6i1_&%MwUR%OIHmmj+o7xO+QMb%BhVE?YuOv%~ETvb}Xslr(^wyUVrXK!$4_TP= zsgWZ5W`wr74(@vt#{r8w$5r?7HYG4Rm_)2VG2z#Tu9A7~4HQBRjU8d(t%_8uHF@+I z0me2rov#XvpABAC4Xp0;qq=d;`R;Y;An<@ zA6VWj&Ni&D6*mBe(9NQ%4WVsL#CHIvG=m zR@Z{~H{Dk!dJKVK$#M0mL%#=`NF=}7-R3Uaz z>9-L95#P^p*NWg@K>L@b7pNlLv}vV))@z{v-yZW(=xEm?8QGPDB>^!6Gy#&C#@E-CtWGjl(S*YLY-r+H2MwSCz3YMY=*TREQ)U6! z_2#70AF`Nj-)FVDY?)OkJ<)~)ii@E&YtjQazGGdcS|qQj31{DQhza+BF^J z^(B;T_sy8>jE~?23s0~%7$tb>f$8SEzC)^ijDT7KGFnQa``J4vnmcC9ovJ>nc!GMb zK~dxzB%U%wL)RXPjb|Q1>s%xB=71mAY5CS`SuL&RKRCpUXjH+{Hl{G<%NhAKhvcrn zvHs@g)W!F`0Gg3Bb!HCRS8z3HNl?+7kEV8d-?3OjE=c7Y)HlH>v(SXu5eZ%1DOXTg zn0L@ErGBYr6%`#Y7pl{u!dtIKA4_JUeAJnrm=?qlkv+!kpcASOnC|x%em6=Lq;W^) z!aS=)=;3t z4zJxL7L{kp1&JTw-GF>Si8p>P$WjmEyL_+VV*#}$7^_b52qN#LN0;0Wb+)ivDSmF3 z2V;9UA&bdc4H;}Mks`=p2 zx!IN3!H!JcE(a4na|Kqace<~URV+o(hn@`&S1ETHHwe`Ej?foB-bzE)Z^x$@XC{NU zWbpNjT6IBD(q)ieLJuw%az$uKyD7F8sQ!*K`yEzZw2n-T9ARB`2QQ41HNGXEROz&0 zK6XiOcRmb7%+$uoFuqH+h*dW2v7Fs6h{3G!tlrWmpthNq%g?L&I%@L(eBR2m3W`>9 zj=b&}2f)%Ajkt*JJsF0_6oQgjjR_rJeY-rn7{9_!6ki{A+A0}?_leE459o6;waOxa z#!LWUF(*dY2WEQ4uC~4YY8VWAW(L|P-yo1V3~eCZ?%)Cq3(Cv z2R----zN`w{w__p-3Z_6v z)jyZ1%N+zy{iSspIZr{>kLf`=8Q+E`z#`p)4_a9!Zg3wTW;W^!AjiV&0Rw{C))i>b zNG8i6+6R-doIb4gSi4r8Rw`HR;sCug334#@2zE^FcRQNzU)%kI^Y60EnQ;Z&+?;(G zp|~7M7qrZ5M5I}|nf8hQAFR>?S;{6LfA$ZK&wlA`B2=Gizod&U-yl72^o#I9*_kqF zqK|mw&|&fg+6CO)H3sco#Vu>To0l7!o)vX9aXzeX*LttIqL)Pv$o2b{Wo*xr;2uNd z_o%Q#O5aP-0Lrgob2#Aj6mqWc%A@6`-nUQud9#fq)yhKi4V0oimdWn0`Q>GC|LJQN z3A)8$2Bn-;i(;1+t8-SpM8}}eqIx5?I7|l5;~$)EtZU_H=P1>jHVtHpkM7V_LhSHz z1TGc*sK0gl%AoqFU_JPHb+KUzDEZxON_ldYHpt)FlC#WQEF;oMXl^3igTBMB!J~6k+@{B>(=Oel9l4aw+zGqzmP$BO z5wV*&0zwTUx3fD+kAeJBlP2XVs$84MV%pX{io@-ebHsS!m@78MBNWG?B`;Z=2O};VEeA3x>E#D%tN>AI1PebXREYfc#KFu@UdOV1@c3O>Dgn6ZWS~$7w@2~UK zRir~pVL}SMKI{evDC?!rFcluGlJY7|AK%9d-B6N6xDZifZM(Y)f$@@>wVgcMZp9SS z1>rq%Z*&^uh;vOk`Fw-P7RAXlr^kBvpS55d1$oknyDY)8prb2Lo-h0tLvS3|NuK1f zrKHxGf))?ezIT#rlC}$tnuo&vrs()^%lAT*uHwU*uV(f(g$|g!F7(r$HHz{B>|*d1 zmU_MK=+o2ghB019>FbWH;&4aaQz;Egpsj2{_mpE5DDr06p+-XqJ_5A^V2+g*BVQXz zSl_8C$}7LV-PE;`56X==PA1HerR5e6>$@O<@xJ~qd;M-hWc5HM`NvEd!xM>VIf|q1 z;U|~*>T->%0{pM#kee#Ko3Z=$#6u0HOQhVBw!a(BoG+e~^8}s_wiB=>66#;-@GRzw zqV4r_9g;`#OVHV75A6aYTQu9q5&isPL-hgWbYdTbf`ll z;9b+Yab*Q7nS<-|GPx{TBlXci3F(=Uhe=X*4D{JOdeJ+@alvj2Zou|Hxu#0N=?V9H z7(5LRCDa(%kFtph=otmU7{@VDsTFEUY zNoF43tP4M@Q|Jo`0F7L)Cth62T&Z^Vx4suSrQrKEI#k!Y6l^!Sn_`bnqgt9Qt2KO_ z*q_rG$unsVc)JOzGj|A0Go=rfH&MpLIG;VLWlqS@VU&wTz}3~CH*7M%Z**t_?26`_ zTD1rUf$xE#4xnsxs{DDOKRD#+S_7r(`{t~fd}pj(BTm~zsKz%udzNQT;3Vvnyl{T` z%;HO)QBt1Wh>Gt%uF{LR?UJIeE-4*#v5Oz=t{LBj_tR9|>1ww9{X`&k8%i*EEw&%J z=#X8*pVmmOVVrx)Dy3QnH48#{DCN=R4D;a#ed;aq9AxlTHCyujMtS&7hpoic z36>^wM#?7e%m^xzvA)A4z<77mHXoBmE@|DI!>Eb{QT0)peC z+a*hon-?3HKZ0>gE^?OH@RN6KdMt=2<6iFAp1lMfsP5g%Sqn`mS)Y}6%{bt$_2hjZ zQ}-vg%6;`|HjsPZbq@T{^WYCoo1P0cz4l!%SFN6f(4*`Urf|3_M=LvfCL1!pogE}G zl3&DTnwOK9D@%LhT$YJY@$=TzL3oDS@&thCVt@>abtE;Otea1BlDusqXMMWv=LRrJ z4rVn@)O+7Z4Z$B=HT~Q0I^Pk&-}9QM&jF;a!*`^4;fXe0XJep0IQ+PCo|Q^MR#q>T zhJmLSeA`}tU=wk|l8K*BO=rqs)5m&i%($szdP+hMoy5{zAfHFp=+zaIWz9sSQ=y}` zCkx}rFc{HaIsttzPTkHAJVhM$AvY}@^{NZoe_21PEWKt|rakNz`MSPHIKV>-GIdwu zBaQZe*u&-q&6U5%e4*95<&B=wc(Z4DSHSxtJ~sfnP;?i&xW5e3dP`J|O>2pt8NE=g zHBsJT6|gV5D@H2wu|H@=wG!S1z_L?lnL|_Z3v~#Vl>8^2!`h}sxL~jyn^i3?8Wx=@ z@4|dvbd=FvTD1eUQ6W;ER&l=S*@q@w#!zn$F-V2S-U1>>1-^Yv*B)^9l#a<23;Y}T zmd%hldU#wIztt2uNho-I;=Fe|$_GD*A9b`$!+iWLm#G?US1~ zkR4*pk@azmBJnaxxQElb=*&YMLzlkKeQ0|9In@h~px`Afqn>4K9c)`jGno`cmtyEa zd&hl02`Lgo8nVubNy5!w@UFzzb94E*fj`hy&MJSIcj0ql;W-dpbe7V4BQ69 zgUUieBe%YCStezjnp|hR<=;x7goLc`6k!tB2{qMR3%1G5am{R}9MVuj7n>Mde#~?r zj6hioslO~9>5D-xsrOh&Lyb^E>`Doh%2FCAr>l`Hxrku~y ziu#+C6##lU|MUmv4~~i$YG`)>o$PJQIPu(^&YF#`vX$*Vq8qlBRE0I#ZhIg$-z@Fw z=TjYKBZo8eZOis?TJE6Lj2Yk=T%mDEXOnNthE|3D>yS!xmN+cwzVhFw%fPh8|6a#V zt;a&Tz0~};UGEZUAV{0#DIM1l_Qz;J(3fPanM&*l z2b?vOK>kMPL_AG)N4O_Z#E{wg*r0PUqWk*Lmis(U$c`rF;wk1=kl2yAayG=h$D)hV zgtZS1trtH~J_H2Hji!`E`Hm{3wJ|hiCUr)zpqs?eYn)JQZt~;cte_enw%V!}q4JuT z{S{LX-3X8g|1T9hnW)4N8kg6>`^n|slS*IHfY1x;(OSQ00AeVBEuNx$ili8m+44t6 zDeosw_V__jJH|7P}7$MIkr^`XfP$&doQ61VEH~9q> z6Pw^X3r25meMDb+GFndXBoZ_2&~gg9G<}~;DFN52d5c9Om0wBP*QnW~t0gz7a|u7I zaq`(tE=eS-_70AxYjSmLc z@pOY8v?mV&QjAFtd@!Z1{V)}_a)qrALP8WtH;m;;6`jN@)n=)uf_AB`M;y?ECF!3> zjHvb1r2VX+g*BsgfGcGY3^szF6G`R!VKG|z6_fg$ilYR$H)sccAY3=BfO4|M_48r%@?R803hHgY1g|eL9Eouv z-z5e+E+iL=lkp@=u|g2|3}^E6c`vs<^bN3}lU2hCAttM;=-n43J?6bVvNB3Kxk#^B zvi&D>MTh3{d-9Mar#1$TZ&8qgEXs=Pjj+bnA+!tg0WpTwtm)IE;eJal4wgO z8bv0iSF)<-XuiFC(!I%#5(j+{_f(3+u0CLt7-p3qVBb}dZKmFwUk5i-1GoD!l%o1B zL3S@Af+*9+C4|Zs?-S?IFwY@m91q=>XoUMBkx0l?cYXV_SD~ZgXdD*PLE5|Ss}b#~ zue{B)9pFIz#T=CaXco=P^8zRLbcVUCkE?dl*K0$}t{A#Tw;^d=NdsHliftq(az(+p zNl9_Jf*K4oIw(SZTg}|*jn6)hoT-DLO%1lvjMtTYH;QExleEfxrksNW`WPL@;~xm+ z(WDdjwoCkuv_U7_dm(M(W+7?tz`^4^P2V-Z4 zBw#aq$nNy^ZhoZ(vcm8nX(tMVRC0DG`~R3Q?Zj#B?T$y`4rY#E7t_#%Aw6^|61=n- zoTqww2G&6_f9RLp5tAh(O8SEnk(VU|(L|@psSD`rqiN{}&Xl(H6yn4Bqtv(p$#f6Z zLM|pOcmJy^6Ae30gE{vvgT?8S1~wKytfbhFis$q@H`83nQQkAhN4#h@xdI;zoQ|+- z4X(@GzEMhQgWEE_~^^QYR~f}78{Q?I$}#twPOCWM|MPtxW>C-$ETPb~gRSP?px za`k_39`mOkbq@Le;5c3U!O_2e^#^D1QEuj`ckpTVe+{}D!zuImC;xp754LTj_%aIE z^nAS68oY?mzc>Da6LTro)Oe?J^yZ1pAASEuKPHhm?lGm$XTdCE`T30Kq4bhvO5gkX zaAB|dX2~mH6nu0V4V zPpS9D{4bg39|?Y8x^*s@8ku_6y<})gmF~l6zdu?er1C5nfsUxBHnU>zduVs7fR2Qk zX=qJPNVQ9T+~Di(;y4!BGKf-5)$U`ygoq#^?PelZl(*uSo zig=Ny*lr32Q_f*qUy=@JmEyH4dGo!Nd0<6rqZ|ULC-aiFFUQ{-rLA!gM>Ig{7cIYx zX*ct+YR>~PwJ3Qj6gKMVrDM?=N~_jFh~C%^bRG}_nfdile!@?dYjxC#xJjBj84S%5 zTeBa%q<$aFIP_Z{z<&;Xk!&K34ruxF{75NBV_m>XAdjX!-Q6 zSJgs-_(n?U1F=ac=}c%I1h|0d`h>}l$QZ(I{{YCDbKg3(WwvXoW)BrgAO-`0Tt8TO z5m$e3Y&%)=>=dw;6sYwy6l3hU#?JP_W&bWvUq8~_C4_Nkmgao2wlgI_2GFoOr*sdC zP;w)1i1l8oHzn7{+jMCfMmdoF*iIJ(q6AeyD@ZVjr_!|lpx1=a)ONhO1}EBu*!90{ z6hd~)k(as-6%QrJZ(u-g(1XkEFNL}dEO7tJmq~-93XQLL(=A#((1f0QcWN+cA zIFRDN&H#$*qLqBXQd%_!lAGH`ffSqIGxCoAx>5KU+yp$BRdA7ZP*3RG=d*Ed*(m3J3FHgB9O<^faXr z&Z!rg`HdMqDW)6NMjY}QOkWN?Xu-9aMiaK)l%x=tD!1}FP!7(1keMI{<)*dq%v2~r zevX$pZ#X+r3G4wpv=*gt zRLP*9lNt8oZcdBGoiIX5ylOatb2cU`A;6Nhd1m9rN>dtB99d9p4TfBgQ8i&Th7mxD z1jW0_t4f~S%}BR4=RooeRvrBtA}ocrMT2sW1(_h6!kcY>^yU^ z1wmQvIFFYN(F$JrR6u^KGDI_@6g21@M;7u9p;np?s0*x= z0Row@%|roQsYvNeOxi7nqw2W1cc94RGDD8TF&~xcVis8lLRXgf7SK1+K^EkgXMxc0 zD#O%{JqTy8vRY)4AvDQT@Ek}x^8|lY7;(0B6$3MRXl+T4&=~lkJ(x(ujq5yvaWM0w zzvEard90zA^~|VVkn~v9as&Wh`jQ7v5^=Fe*YRCS92V+vfDY)6ud0@b!&F{9Q=?%` z^bbwB7SKVNE@ZH7!QY|xeEAWHD8SW^T)W}-o?sE4}A3!o6YDM9ZYuut2(fPCr7dhxdTFgRXdDP_~cz2#c8@N7z_tFO7f)Is!A3Bt0C#krY(-`40|}%F6Hihzi4G zGR%;xJs^2cUw1v#s!syzt3v=G58oCo2P4J?;cEt_&@fopCc!s8f0?cc*&z+ipVr?< zJIyK^;ETZsNk7aP2Pzp%Ws*8TAgQhsDLUb%tvul|u%M8elY!>adWggCuw?vcPnv3c4d??aoP3K9|ATY?5~YPwJL{)vgm;q8NZcvCw@0=1l%>@ThxZ1- zQneUIb%%#}gJF;~dxWclWG6tgi;z?lRXT5>yV3p!r+2X>!i|o_0BB|G(@>7~RPq^w z?na8J>-bFw>XZG!p$P6Hy!nz63!B5BvfhwlPLCCgX{xbxmZk-_H<>n*Hd)o}-lpXWp)NLlsUm{k2@8^LIy^K^i;g{HNvR)Mkh1Y3{i6ps2 z3zbKwy&|Wdk_Hqvw#WLK#m#%lGWIP?9%)O16N}X)^O zB5;?vR#A!UPNB3hzHOsUSlWo+OMlWCQ0=Y{lW=u%idO#2e{yf(Qc`l^Z1IH-jm$}w zRi3Y@O1eAqlCe^1=pe3{^X!*z*viOjW~OcGd0IZTgN;_oZ^}OWXhsH$cO z>#(M;p+QP@hF{||{Cf~x12J-U>Uln}bA)O5%wl-yh&0KH08uPc{$id36m9UZB{sgw8ucysf5X^3O78x=^LA3AQd{reBjjjeo$agWtKE0$}* zcX$|B9B)24Tg*yc&wT-LNs9h_Qa};E+5B_s_3!ii@ekl&bIK*H5)7)AQl0MsIF{ z|KJSE?f$`uGQIX&1Af%5H_<`PGTpn<;QML`seud0M5u1lBlt4z z3I1bvtybldEV`tn3Tf*b(kKvIB?|bG!TwAK27`BJHfogRI%O=($`6BrOf{3bFNxJB znviNE5<_*hJcP_4;{-~c9Lc8?K#B9i_x2#9&&ooPcIW6wE|tb?^xHNKJop6A2^$C* zSL2IAfU-p!GY*X~9ffkJa(Gx2d`GePM4oTlWc?SU`eOZLK3ktduMy==>Gn;dK+2Zs z*C^*Z}#Z>K_Y_xVlDgP zXWVD}q$Vq%O)Cg*+a^=zNtG@7akLyTk%s`)W&)^I-*7H|)U8Sn{U@*G>J~xtvQJzb z;pPr?WlRxvw+I{l`pZ>~iKu%dC!+&6G%O4^`%)G89^D}bb48xA`tKixC5WUAW+Im? zLrb~F?&&S8HY{nws&Lh*G{_OJcp~Y}X|`LWVfN=Ts)>!!Z?AOLmx zK~us~LeVYq$>BkW-$gbg+rqsU_-x5HDr;l5ZMJniy&>gw;xEfMj|S0H{ZfCKZxeTv zH47^%WHsTbx(q68-MpNArQe`z)?g*_2GOWbRKxb$eQ8Wkkx63J#=)GYso)z$rsM53 zym2A<5G|QEhFKx3W1z2NbZ)x%_m^pc3yOf>Q}Zprxx0ENArWN1W93|4FvV&J3~@2J z!QA6@y*F9bN``GddFz!&%OSNomm(QBbQ}`Kk((RLflrx94?K49`}J;{;9BvbWol^O z3$=w;r*&gCcYJs*&I@PRNA#*GEmc%jSteN_%v^RCqUxu~$Cc9JH1Y@xe**-fg2>&y z7UtfI@^56fH7`8bhi?Z+ByFGRYX2hFQ~na{Z_~V^tc+gW-seV8L4vD%L4RDa1`M4R@MR*D<8VvrU5_GFqBPx4O+!C{`$8`}Y56?}wxPiL>* zaMrfXUuEZS64@eDB7Neoxlu-1IwPwS!aS+dy)5aZWbrM;WQW777af(2?LmKVSoO}o zZsxP?Sxb<`kE5M#vpoXt9yJ;~8)ZtOtoBk@x;BP zU{(INH!zNrjt1q$_cpq3xXb(D_6$&!ND`0q8^t9(v{%$H50p|*2UnH>6PTdIHWrWp zF~}-AxwW<^O_I&iZ*=YyPu{6gYPgFF(%B9!FX}C+2*KSDO8@q*lbi|rAdR-1IBb~* zwZ6sLV(a9wtwLk3G&tY&BxZuOYwjH9TY)HN4YDXhLHS5>jkKb#;QE1 zDOR*d#Xchc;GoQtSAJv8z^g{O+=wqeBTmuis(=wpNAEePAF&5yw*Qe6wc=GNmJ=kJ z9{jmSDkr)xrnsG<$vMR4lowp!3!~fRJaJ*^)+GqvOJy7IycG*6EKXHgE-D+Co=KyFqE(6XrE0dis$1Pxs%GbLmML%RraPTn7u4KRBCt^w*8nZneimU`#n?FeE+G zu~zSsX>9cu_@$7kfwxOeT9{J8jLniMdXlRUX-wzHda zI{7uDkxPx=Lfj&Ds@Txo-=}Y~beg;Qby`cKv3h%F9xeW^*xiq=-maLV61sRh&2S2&3-@$cX zdQ7Tz!^FuoO^<-d?~@-$qKDAbVLu!!&zuoUBvFu4OA+3f{Nn@zOCI0#slXp5%6&B$ zYmKI*G_mY};^h=O(F!mmFT*{Mi}JlOu)|(6R>MDp)Po74uUyFcw&<;SQvcL4@Wa^^ zvi+e=jfuW2(9o!j&)_p_pwA`i-f8+^-+=Jy%%=W>RgX`{pqr;c%tuZ05^&QA)Sp-J0Q1sxz~)Awr?fn zd_X}~@X_uw2%=vjJ(?}D8evehxjK!^7&>Wi0cx%UV%0^rK9Ajw&Aa6<;vb8Fwv>S3 z@kj{=|M^lSy-#`NaAv>;sok+UEk(c(9D9i5Xj@L!;MX1nSc24S5@!z3Ucz9p*zu5R zOMPm!jQO3K>L@^&DTKkJaa8H*)>py&CxQxp!Q-1L;SpuW*WsEzZ}H}DnY(O{fsizD zjlx5v3rL0ILP(?I3)>QeL2D~Ox%xuSP^eu$RQYc7{=(a5^;i9u9N+eyLw#RsFP??=CaS>sT%da6vWEL98TQ)mX zSS2qR_K8V3n<86WurujAQWtAbeWDWc(f;#RvrUS!G$?%QOZTPRIg3Z$QO@p)>N(qR zjmtK?1ueUoTk<_OZ0vT|r(I$@?VKtlN{2Vs{^LydyCvn@ke5)+v{sF1&s&DFGerwnfvWy5O$*>cBAEWuob)k_g(lx5mQn*eLvvT5_ z8|5#ZDL0=`99K}6DN~2Z6>=TC&wH%_-rQHcaFdI|p=$?>;>S_-t`R^6e9f-E9n5gF z#C;2R!3N8H^g@;vzI%+^!a3e*8>zAj32!y%weD<}Wwx9CJdn!EE!tQTAdOq4~>^FdEI@|U2wZfI)P zEo{)vHzV{uc`ys=B)FX#G)}a}Zh{rhFCo<;-fvUp=%DhMP?@q6f2Dh9TItqMIcz$-n0=#*^RH<2%^AAq< zhq+szIfnb2R!YDd{%EOH%oc3snn@pMj@|};t~I4_oC_P(ARS7@NaU!H8g>BKgfuJp zeq;G0B!nk$TgpKV2TVeyVz>6qeE5z#T*Nvkb1!%WQb@}dJ}$8r+sR}vv=(s;V|p(r zIuD>{6Y~_3eP6-k@`{%BrQ?NmpL&0!-q2#&(E2h=9+;%YCXy6;*8e7zz)qPcmn<2d zv>%=Tah|}~zals`7pII(p5UViL!&f+$mJdgi4f(m=NTNoff?{ zn>`1sU%1=q9J2hxU6USZ>!kP-)diG^NXwqvp?%~wDN|DyL_P@gTHn+dV>^X?a^JXNLmOBC&%%Us;6mYB;5=FOXT7 z^om6%LsTMTaJ0^)GPjE@RcmmN4@LIFj);*NKv%P)R~$mLVf<3iDM8eN~}~>)f!`8xxb)85aB$@ z%EP$`QzENdUyPE7Be44=2GN0yq(Ks+MxV9`yY+cfH9qQi?%gFBw#aL|o?2mF-LVws zG0-1;!J7v8mCs%}p=ZhhOs18*>qwtqh2$*nGvrDBN`{*<1W}M{E~EXeU$}55uL_v9 zHdq*`^(B%k#Jn&f{fPIPm{nP>Q)dqn8%C75*UpxVog-xL&-cYd)MvH{skPJ_+h4P_ z4%GyEY}G&dYL?3CJ0zQ5D<+5U2ZBai3dEDus*(JR{eV#*_%Y~*7$})A`I#D7yxm2? z2z!up!XF&v_HUVY!joZd(iEc96%!v_;+4;Xsu`nChcM#P>|1ge`%-*zm!>5fOX_R7 z=$16jz$}O)rh_DYqN;($7?@e0!Z=d6P=$E8xuljt+k-ptVk_EsG+IpO^-7}3^Go5t z14!P`3C9n1m5jNzmK%UU3@VT!rY9w`xiwAKD+`xD6~!*3Cc8yO($2;4eQW&vEW?)= zxsSfWJzRcn&kzz!-ilKZB=aV>O4e);{p&nK=y9Ch#)Bm3jJ5Ua+A%CCO^&9YquLmY zFFKE4Fwjl_x$x8{vafQ@l=tqQ;0pULv5kYaY#(67g7CQXWJSE9egTYwq%AS#5>MDm zZd}|CGQAjKQU&0>ycp)8*8%d|`IVZQYsL>Bdr;{R!_TfB=wpTkJnRI-r`b4A_L!q7 z-eQ|W$cVgFK{ZK24i-rpo+|y{n5h~MipG+X63w~E0^_y|i41u!%XP83>@Y_R^ety~ z+QRt(wI_;Au)v#Zd_s-xl(BHDzSxTwu}K!1#u~KTwBbE~QQJ{A!(!H6w8``X0$xAR z4xt1%Q`UQ$G+kt|O~Wc1Vpl91Eply7qdztkMXp_TQGBnn>=o7>1>~tl&)Bqcs5v20t^$8jVR zO%Lcox{zo33is9j2~y}0P5dC0{BIrBJk8xwyf~KsNO!0mWs|dw}LR0q;|L z!K1Z=t(b+Y93~0g0V;eJwW2`PLecv?ldX?#(tbT3aRo4EXp{3OWh_@=kQ;~X2p~rh zQtX)&HiTKL-<>*`6KAROlQ&mlyMN^?IaD%DUo7vh&$Q^Y*<}jL;QLX6-esGLy$xn< z8DX#8yX|IUtA@ikn$t0A9B2#Hd$NZXv<5#=vsN}e5B3Y-lAKT!M1KVw`dpZVil*iC z2nIh^O&OLaA$2oe=!GR|7Rc|SxKN$nl3kc%Bf0t$I6rv^Q8K;Vu8_&w%|LV7sJc~i zKQw<}_fSZom7iBQyykFZ!skd0#Q(D0b*T5LK`$;9^)O*4N?ss*GIiw;vUxHt(bKHM zuD-s!ZqPsIp0KZ*!I%ULs)5Y7cA{*_TSuc+3$4=OX2}NMXgBK5DM~H2X`#vr4dF#P zycmwi6|A8v3O_%PWWvxQ%a^)pAzmroq9wb*ThwtnzFysFF~ zt#HI6P;1b;pme)2;n-rcsd@$;7$uZQC-U;y;g({aNHtLGxSU)?OzVvS&i)!wK z#tx2#-XjdT@0m|_m))5U_3Fno)BPYtmj3+T)H~HXx`g>_)J-`kX}uks3I&w4!R)U7 z5+CD_L&CSOP{mnRSj3OH#`^?M!fW8`vrim^UUAATVk-O}+z!0nJdFQ-20W9#;_K}g zjgvh6@D?e0`cdyOh;^0FM1{(ptt9f+>(K0;P03MGCEhYix%{5OMWDW)-cL+&#`!q8 zw!{u-vNyqQoH^7yc_wdeG-o@1P?zRh_U+B>Blm(v<1fAepRJRWt|{~f2f9Ww zdw?x-x_x6x!*9x^qU(4YaK$3C@O|k}!w;oISI4bZIKQLR4eyWns0AW`gN6H9${(tj zIR5L}F=GE_ieq0b>B6m~Wgt^>0x4>?cu>}{5S2yU44HJ9JsP%5;bz90P4r6@P=k2$ zl<|K;tCZMTj-h_Bb&YkO<5iAsHW~8sdO{Ai}_Y^jX4LnI+eOpEjEJ~Mo4uMG&8QSNQ;*$NnLpVfVGpLQva%Gi|5*?qxn9i6mr z>=IlfBEKfYW=r95iyg;GGkWx=k9Y-B9@yOgMMlKNh4OoDDh!Y*1UX3I9G>5ki1*v6 zUW&+|Zw=!*8ujYZ8JSR-w%JUkqg$_NdmM~4DXZMwrgw0xoqH_oPx-EZmnSjx2Pdl- zdLEF%Pw+)8?H6y|yS;qiDfY&i7qg<&ncuL7m?@PHJKTML zaJ(L1p_e`qmi^@V@27O7OYU^(o$P({12o=k4z28vADZW`*RUl~mAa}wSJ!y$U+%Ki z?AI8bbxpSWQOj+SldeXg#cMq}Z*j|Yy+c0TqjZROWtcPaQuU&@VPmOZ6x)5mFesZn z{|AR(Sj%ctfYHL+oUzE<`q;T<%bpva3>D`erC| zV&#`fi$)ENnZ*;t1=Cq@!Vz>lR`3T| zRrj>XbZff=@WC{Yf$sfDt6xWg8r8(e^w@udqO(XIfoMtBdGj2G4>U3@$w0wW^0a${ zxe!KDI-kCeLgo$Q&q9=;JJFwg_2)3hb-i-Stk4O+5F zTfD7YEjzL;L-{R;Dp;CQm6meR|9A=^-A}?!yM+$t@d!>J2sQc%(o5&siAPXkTc6h3 zm)A`h(4|>xL|0;QAXtnRVOku$@3cO%n9=O#^X?O*&yFl*dD7&RT$lbu!qiz?p=^=k`Y@MRjbqMLyo?!f`K3q;8fm9rPnh*4i zeKCxMDv7*V5Q+#>%LE^0)}6`+^gWm4)lF z^L*|3s!%UoElEybDq@0l38 z)NMMBFW~o{%-~mNCCv#lNJAjaMs}iW6HbC>b};a`$yF9ClkHZ^TY3DP!Yg{%20-n}quY%!w|Dy@xpSa0c^7%u+!#qb=x zKD|cUdger)8<8r~!sEKN{Ncjc2N)U>C_3etCEhb?M7O&z9POF!{0FBq0$Zm`Uszq& zaoi1E@f|>eGF$^ULRedAM^d#Ys!YhvHc}$x0DE&r#QHs(Co831>nCk?z8Qxmr+9{X z16uy@#2aBQ(GE;y+wrIhV|T;XKRBmF(=-5PBic>FX4h=XAsxnHCalSn30uoh3JLb) zNM+akmbZgJaXch$JsV>)fjc{u53%%XeQ)ZJv%2Gg-GEzE(>dh4COg1#9O_a6q?y?u zu86(to4c&$q92EOi&}jB;er-HxhBms=0y7^+Ol6Rp{@YV9x%O=_PiYQ&dH&{`$p7! zzYKcpd=OxMpC36p4vx@EdZKn*(fg6nee7DYy-0X3@(1T0Oo*Nq=MRJ>`DKL0p5p(( z;bre$10aX`Tj3rfymj}K%bX+WQ7VqRo!@f7r7rs`k>!Vxibr~Na&nqaTYW_x8f+;q zpD2ngN9kA?6i?OR+m!t+(k&|23>@Gl=b}cf7f(_mJYbYVipOQ!AZB@6YjC|*o8LvW z$7z+L9Zb?V+#-BqA9~Sp$wUpUqD=U(KCWdXox0o3p;?H$Pd7aiU-;>01kUVX6A_)p z%QFcA{xSvclA0Zvn{FC*VQ7A6p3KtdFpXL<8i39t3M>s!p@MU+EV8F|brS=>?R9-? z`JEby^^i>80_n7f%nbLpA2Pwl3O6fqhV1gD$uo`@66B=$0iOGHDCtPRW(LfO@n%nsF=|PQf7FY zunT$R5aAKAI9*Ix}@t zLQ}@18Y+i5%x<(5^_J0WAor?@dV5|zGF*eaXq2)e#9wMgz=-NaW`;YDuSG81O~am& ztPoX3AX8<8M2evX_u z$FPv372m$>a0bnQmIuSJ?L#~>HVw%;uz}^GeP-<+Fv1Fz*E7x@>Er5P@~MTQs|Ia} zOR9y$7meJ@ z(+*sO$Hh>Kti84nHc^mFP;hGLdeEh;=7&Ut<}Fx#jY9_%a$-Nd@NV@)p_h9hH|vI` z{En80@0S=dJDu5isiPYjN*c_@w$^~{%9-GSqNV19xUWfTxwe^DazIJSRlNX%z*6T6 zb#RWvFE;HjE>t#++$Yh#1j9TLYdDO{wfQV@!QFS za|H`g1|9I)7hyt68gVx5&Gpm!!Xu$nL1AK#b*U%GVz=($j5JNa9yS|$o~fR}Ye3nj z(1KnL7{!?^OOHj)u_(F{YU13oAQdoL=X>$b%rtjJ0N}@E`axI&TTpVXzuEwM{M~Vv zvKKFWlEXJClycZm!&CJaLlB&W_ZLUo@HR@W=tTy8fHI%|xii{Rb$b*S*v!FZ9=^*IKGr;wfRJ~2N&(yNfk z!l}CY;r+oPXL^U8m-Z!J>W#VpHR?1Fzn^+&PVh-sqxr~D6eKNd1jsp2znx7)eX1cB zn-ZRghHI+UE~-TGLnoBkEIDYzVikte|J&SFbz;pQ6|7;UeZv??7tmP!{Yu`@sWC7_ zCFVG}lwMZL(8JYU?xTLGo~v_v*Nn}{kz9toeT!B2k6)cN7U8)xqHBGE7apc)w`Cx} z+UO!n{cK+~1t<}@kR;Nkcs!o%b|unchfwwq%i=ho^%7&f^?*AO5m+k-M}mh?;yQTA zLYDu5+xd&_IC5zga2!;Y)-PD8`EC;8OEKzMJY}>6M#zn_pWKOwiK#31x>#JDczKl) z_WGIe=QG=Q(n;-cN`%@2hP7 znmNc@9iwn#DW5JQlPd4`$l9GHI9ASEbQ;Y^zeH5&@7hP%@2k%C3WJe`nGp)L!kbFX z4(?nPGr)0G(gyBasPZwd>Gcep5t_zbM<}9f+ z|2|Cfc=+MBXpTHeR_q$(>E};gSnQIx(#5`FaO@INwNF6_@ z+yM2zpjA1gV_y>(Np37)3g<@+WMP?eU-4zv@9XFWqp3XWvbZK zzCC{Fny=1vzbP${&`(#up3w)EdB=`)wr_E0;sGKoMH(nvnTsneV69KKMZW1Zglc>` zkByr3i@3L=9Xj+@=4|yhLW5$G4QM#l{yVIvl~Z?pI{;{J`SY^b!T30+tKr-la4Mr- zGOKryM_Jv<^^Z>)-;)FLcg{m=T>A8qiJxybJ5zUn{0o*skO{@XVX=5xfG)SGRFFfJ z#{xiH*Wn;gX&6Xcah3drvi5gxS6fdhw85B`Zva`R;gS=?Y`|t;M4S3tvCzIq8D?ky z!~jV^w!cy_U7^F?sJ;D0b4jGxl2B%P;)c%j?ZI}(wJ85cSNkG;1eYW!!i^K~pzP`k zE5aixid~oL2fZ`}3}rNY2 zS+M4S(?{K;HnuGSpdec6oGd2G51p4qlSQh+=EhaVX{I>o7-yS3M%kS(YTpx{5yfAmXPBkt!IwjYsNOW5kRzIJx z$P$7i{QXZ_`n%O3S3}=K*@!i?Q(UlH;P2$M1D83{_c`XoVDz*U`FjAZCo<1`+qCTT^_CE@ufVzv(ViTh#+zE!(l#w2a zEZeSfnLpR19><~wOtqO*2VYNj)s?=7comb9s-?E*o12Rtw@8K}G)OK(Q&Ve>tjmL= zEbK;@powB0yU%r=Ck$vI9S26zeD{A5+~<0S7I_jC{kt`p$r{xroe3Q3$^tQ>Wd7h} zcB~JpLI-H^t{Pf4!Tr$i>X0lq4tL?v5pg!I0^dni8F5Y=fN zu>I-e>5~WSgA_;UwcM+rrUlAZG>j%ajk%cu9z**AHp`Qpw9X?;nw>VO*WkfK;=$DM zkX9xy8b%a&mtvAvr_6=dJXlv#aI&I5?@AR-DNZx10jjwmNpG9MSeTdJ&rQ z+GhOy`u7{2Zk`v_TeRew=^a)C2PmGIV^gMzhJ!Z}=N3k~H;M79N)dKq#}uEd6X##( z!O} z_IWoC6*+|)x&v7UPn7jiL$~lWA8CcB*)_BFKjKN*EZH+MKZE2^>2x6Euy#$7fuKQm4bv~qgb}8hMWcMVRiVjx-WYTL4)BDXn5p}h{EIYK_jXsA^BzL; zjC&H)6|V$@8$g*~Kr5Y7&%UtZNj{+{*a&9qvl%olIAst=9EG zl7aUJ$8M~;6quQjuq3G z>PqnV3kCy6fU*gTh&c#uN4GToVorjUUA-AVFHIlY#FS@YHYpFSKT5}d!DKYRWHm+%E1 zJ|6D#XZU~NJbO+`_JT$5f3BjqBto`66aaa>53quUp1+0dCZUmq$c+;=1$$rkk5ft! zeMr$Jp1{Y(DN(;-#Z;<)-Wk~Rp8a3H(Z6u;Ju_%mX49qV)A$*oK_@!9tnXt4nDk$*Km)vck6D1Iz2ZJY~r{n@(o>F31O_# zVSCQ#Et4j}>w7@!BNK4C(>!LOt?_w#Rw>JryX_~-+AL3Q7@dC(jit8wt+bzc4X}*+ zQ{MVilY!r8kZj+4jD?%R>}p5*9=XR6i|&!3fv1D9VMZhtXUr+hV~mi=rGitccwltq zoccEz^lhYa7FC%17ps27-$>#vqsQp4E@ppgRJ^Ttd)rc^?pUt$7p;Y%3!J5w4Z(}b#IlUrzzZ98yd;fm>r99g9 z=pTu{c!VcXM12{J=GSXMe;cK*$JQ!-Ri6i{uwG*iu(N?LC+X7l3#Sr*804YI&%m^Q z*egE~RB5~B8cy52uyIrpHtG}7aWmi2`;t$gQiK*A!+?idRIGwoxil{hP7lTmPLzg= zkk4v_l`!uNx6tMj26RiagTWH-|C&K-5!2vUcQoS%adb_|$@Nv4NqR+IeZA@coEQSw zR3}}z<5`|h)EQ#p)~n`7he%`pc2Ch{z zbDuH~vHvpsWPp|MzzqGhBz>)_@X2yKvG(y@_lY(WJ!ZXa#Fe$Om!%qlR!^m^li`Yg zTX=FHCo2rbk}l1X)-l=h;E^{ZLF_10B#)uCwLQ>7!au#t@c1yw>IhGZA;$8M!!gAD zGLksu{X>sJoc!^#_%yT;b=z9{_v!wOQ zmYHxsA+>}mKE)gfottKK+)+(`X}UQ(v)EevNWX1Dg;C4&+a77hJ8>)Q=4e^XJSVu+ zYFaajInvubL_Vk#EnLg??v0^jHHTpvVUW(xmGZ(3tI)akWzkrVgTE_3>4nATPMe9c z$F|O{9Q;LfNo}UnfUoy6sH{UwxhJqeuy(~|LiQDfV&tb8r}r!C*-RdUU#C94iz3Zl z$vT)9tyKkmmZA`p$d_i1Y{vy$nKupnn@>d<`9$rlYys*&{{j9*S@u(jsUgGU;Hq@n z;`}8T#5#}u()~%K?cc4vu*5$&Rbpl8hbb?dMK_arF|BM3gL&#|!a*TzXWEo4?Yd5X zYXw!q@{|+Ax>CK{unCE)Uhn9~7{(}%#D12^Ww(Bwbvo7ZA;!Vh&S!E;);e$eL$2|~ zUtFo*z%x%~j|fL|pVn$eO$4Dt%2=9Y5v8l_@cGIN&$(lmLGUPfiy6nI@T>e}*F^S2 z2F289uCI}C;GF#7BuB+UdvKd@T?DRGdkRyPn;U)4UuNQ;)FGvUv`$XEzCbC}P5W0_ zZrO)D1gL>+|G$no1#{}8ytUFtlYIn;Z0#$QB2wEqMXMM+&PdgZO_n8lIG zSXNTP8WRSa>c(Mn^1%nObhb7dra?EC(V%3`rg+l16JPwuihHWp+fsSz#WR;i;<_@( zXxmedv0}&~{!Vey&e#3TI=Y=o?J%9%=7Vvg!>tiMpj1-xM@0!x*8!)%XuGN9KjgKG z_IE0=pIz?$ewbOH?QNuN*b2*!tLS^V$+WE54JXqgf%J5Uw`Za3IAELN7e4| zSvmf}_%);M{_8Z)d@4)N#Q9AlmRf3X%Qv88yHmm2Z!oFS7K>l%g(6VzPlL_I$GdRe}&6#7+Qs91xEp@wQ_+|?w# zNjanV&=7MRU@#@x@$b&kyOf6>OVZcjGvZQhPWhr)b#xpbwB|{w#^eUN{yEHMFJa`U zZVQ@}cIK7JFcbpQbMthb7lrEfDxNudpJzsIvpht;hV2-&YW(yW&wcj_rA6pWu6#fx z4G$yq_Nl!l-gcRBB)`gytt>UzsqnlOVF+SfxEIy(0)FKMh^1K*S-v6duN?L&l&ZbR zPFhTfUsS!84ya-H>?QNgarl<_U)~mBb|S#TPl!=-5x2{XS>ot;C%M@cTJY-$+HCOI zlSVP?Qgd5dp)cgcoxyq*A3uqSW!_klcC^5rgRjSuzVK~@SZYI|pkrTl#v5~#m@1@3 zIbP9BKl8-nLPxd;K3)Bc%ka?Xy0R-rLT;0}@O7+ifqu5TVSyBv>-RG#i&}j$TEYol zoLKf`?qGWi$TYN%4xWp@rbM_N%h^yHf$87mrsjM9mIr~Kk@kF`_9r{~Nf}Z3lgU*@ z*j(sT&u&Y6;zcp_`{AxQe3!CFAG~jbGY&NbgMY8MQa~xViUnX}_=~PZh{?##LDB7c zx(&$29~>e~>e^m2Wi|?ykW;`({Y#zV2YpV{Z)0?q;LO=Q!&2S`)5&IOpP2clpo3&) zItG0FC=Aow5Rael+~8A^fwFU<-(=gDl8*7KYNR=Shug<6tvf0vb+!6{qk&(wo8%>Z zVvz+XMhbIqKMD7u2?MEsG{52Se1iH&bs!~$k64}LJMJ+}#XmSW&3b}^VI|L|4fVY@ zyw|gb%+<0wwy8abrGJ#lQorTzWX7e%5s?de7Fe-yWpOe#7e7;B|M@w>@a>f5j>sKI zo3tlPhNHdGv5z+y^}=WSJK15*&HJw-(nd}ODsEj8$>Ry`Jv%qo{$kuqV^+s+-34IH zEw5DQcy|1CjNt_l?BP9lk6juY1ePp=ey z#&1!cZ=x7|jT#r#rq^)h$^H>rl_UOZyj=80(@rIQnUP^3g3E%Lp{8iDFR;Q}+k;o6 zu3EAXX}a^WMp*L2zJ(C|$8Y&Aqw9trIlt)N7H$;q5YLP4P`_)<_9IP7B|h;mQa7cW zEMP?WN+vLa%RA zVXaOdY3CmtG+jq9{*Jx**Wa9rabkmlG$h#SRARDl=|Vn_Zy2i>IG9LXu#X72CfdS~MKRwBU~3?YsGX&Hw~+2r1Ocl*(k zy7+Fo^X57`;dDy8f1Jz9g@0?g7iWgFam1Fo#Pv$YS6NL_oC$mXW5 z_GaaqrOhw&n=a0@kOWOF@81MwfjOCk7bmV2aB z2eKB<7qufvVMi46s+`xqDUmK;ohuY&N_+PIrth@iR#k+3-o6sJp@D`hFx>@aSAwVF z&(jyH8xOdCMDpfC?iM*~?9G8+1HVcXk$g!?+g>HlUHp=e^kc4L(iW^wt6}fvT-uA? zcN`4;k*#oTul=?5!jPwM);NZZV5ltnopi`rcEN$W_Gjf~QK&=HVnftR>G-5b6~Xb0 z^q^`D4_ug7N9Rxb91onp=2zqDMT+mdpd_}-tGi$5T6E*vuJZdTc)oKn;xrlJYyOI* zI0$kGfO~$j{91y8ep#wgU^#TI?Nz>7t|V<%);E{ou=r9fdtP-e+kN5%@F&B$(r^_@ znOJGLU(&Bb(oSdA&~((0GO9;qDNj8@4wslt5Jj#Ks=K1sOHpfYAq}PoWA^nvO!@n& zRh&01^@x5vt`QuJU=BX`a<$^~RgvcCiuHIVypGL7GD+o~mRqa&eh}9x-u8?QLmLZk zy68r~Xu)?zPAn6nS(JjC!E%+GmU?QI|1*c$uX>7(fLHW2KooVJ6~P;JTd&B^8^}6W zS^O@UhJS7eJ;ZT+^)>|+E#~nSh*W|2#zM`8mAX#(8M_oR9C#6XJc8VAf;+agHi<6aOsP}ftBsOxD3x3K;om{cEsJJW5 zx&d8^7)Q(c$?a2}ah4)_%YeS-`k1DMUmw%F^Tu4|J?QYvUhH9QLWmWaIK|1XH73j0 z$9?@|#&@KDdW(yk5{`xVD~qQ*AMZ(O7VvUV^yHV)?=7S1xWkHH*C|PxBRfs~zC$`nxLpeM8a27~%n89?j&o2XY${ zchew#|4X!)CE{x`r|9@WNC8vm%ldO;X-UZ{ptHBqq^6m4eA|$dx5zBs;KwSz@^^Gp z+X@DQ?gQ_=Ts9Lw6&XZ_`MUilsEGb-&9ll7zQ`~&N7>7i|S{%*74R#gzG39+x3`F-U zi`F=Wo>U~TKmFlOl%<+TS2X99GRX7f+hCe5ncGlnS zdXOLazg%5=C2(-V!a0b$6(C5C-T~RcOEPvA{8;sPmRTo_ImP>BVv3$+8+C^iN$e zIr6JdrJrtw`~hSaxACJ3;NKF6bd9-7S=`#+H-e!BfWyV0^Fsg4=!Qb@-n*aG^#mwfZ%-PDUp6)G<*;b;^y}}%@uli~4 z4LxT^Xcqg73g1*(H|=&5X7(}cmAKFgK|4O^i5`!unYpbQN(0Eo)Wd;)OXqnt+_k-jQ52+Yww!P*O2@c7LCcP3%!DpL{ zK49`q(u9xB1P5=D=1{u*cH2@A(s!E-E~(l7q3bUFcdWi+QqN(4%_rR+i&A1qD!L+q z+0t))yA1!D1h3KfcyA)31@EfJ8R;h7oGW*Edz(_Z^_A+n7^E4$J*0UQb%=6Ak_!FQ zk+lrz6$wF{u{50+9r1c?DA#m-Yt+vrz8vJMO!M$iffCV@xn#S2G`nH~g&9c9sUiFN zxH2f1!cTb1+3$R|A0DPHLyFs_59xw%T0G(xRb0+mK78zvSyrJk{!Ls$l%^9B7RdfB zkg<$?uf=1Gj)_5kg8js_pt7%3<6lLl`R9fz9on2Nt+=|UrVOw?2Ob8lRjMpwzVcjv z&=L=(g0aov%lb?)&ScAoZt0A z#?Lp@g*4WXW8M-;E%zI7bOg^EoGE^SlSZR{r#JmoE;Ojb-UQaq)^-BSP`W4%eu1ni z$q(b4!lB5~fdV}k$^|HRhUkM+t^jD+yJ_DsSC|H)I zu0c<~F=&R;B$pxNbIj{OHQ{fsUeCARV(#-U-5M!IpTos1kLdhoWqe=m6_;~*Sh=8e zE3a0_Q>gylv0J*zBc|ZW$=FrTCZSLi&=M8$%OBTpvkwrNP2eI-bX|H#x9^+kWK*t1 zCC}x)Y?I2B=NDQS=Y7t$)#}r6sqc)plRoAJCPA*K$Wy?Zber@$PeuN52n;k&bYBqu zs8FTI7&c~DNm;Av_?OXvKJzH6k&q8z=F8`YiCG z4zU|b1grARzXqlB#jnREz_`xYn?HY3QbcqArzOQU1YWA2z)W~cOZ8e6~PvM z)@Zr+>1Vd|xt7kE&qGsanqL;v-z)L&XW2ft1@bhOan~xK-t8s(nSP7kFKN$xx$wP* zUOLSq&^;lZa`PkTuOoJ(ar49D`(BARjnt7oCXsB!xU+&^!V-q-OOpB%ll)}*X^Ouq zTRtwZNZf)!yqiBs0sj=*;9jV>z%i2N+SLe^(V}|C#hEA9mQ;yA(ejf6Y?0X7C@bKv z=9_YF5Ea~w<&wUyf;Fx*QhuN{xdLfFmiPxJaz!Vew4dJIW?iZTwuskKMJYURk%Rs) zf`V$3N{j83@9e}i0;e09W=}*y#=_&8C-IshdG$mGr+D6fpvU_Sg`FiGkI090(pl90 zyIsGf91_e5hCl_=KZeZxv*r-c>Ic z<%0NPw0A@)l@!E!9N|Q;9-=wsRw+@N!nUi=%>QEEc4xuv56*Mdx(=YGA~d(6eSKzc zv*(ab2+Nt+_Mkw2Lu1I|n5&Dkw2IaC42-}HJ(GCCD{GD%fb~1SAAYg-acHaOwb2`r z8}{|1uy{3}diPU4wyZF^M=EMsISm?mX}S~84$oMy@^dI zcOC6Rw=c;y4?l}*`fYdp=>bYap;T`O{i78$P@FcxM}0Ny#rjHAFNSU3)SfUz1_f$a zic^{V40mPV;qcvrZA7ZG#JH`*^4a)%lrfSQN(`zc47AJb736)M7v&SDmF%%8@Gpkt z#Lmiekxs!^d-h3GI^gN4T8*F8>_5sUoogPNGyEdDHAnj)c#-ZoQ7$ZqU+p@RrX+lu zhR1v!(7)Ne|K`m#y{FXWuKZ-va5J7IVH+ElQjq9sZswZIQz)hOg%$rY;Ah^ttFMU# zd9EF~DvhNU)j)Xu!if6h;q!ySTszu?AL`DFW}I%TRMdZkPq-gYPc^+F=!ugk-*pl7 z^(bIIprE0@`fFA^dj6j;3}3`|&8tApfX#yMZ{zqQP3d}MI_HR=9d+aoD~d(IBO*U~ zB?@f;%PvBM@TVqF8i_fiSP6o$SNl0Bfjgy)`kxOZ@v zZx;0RCI#?DYcTurwr0%w)v0!nnAlJtv$5*-DWqlv0lCvfwq*ahvXxMM`Db~}s?4$8 z*o!xzUH#ro#1Bf|>~m>Kc*Z3Aua^vLHpsr6Y{<6W zm-`|oD*T?KwB-K{_85mQu#$%JWl%v6Jnd=csQnlkdT=fmvX!Ypp z`9N&DYe%U{^gW1sbJ~QIDQLzZ@I%h6eCDl506$9n-uX;8PGsriN0CU*{+0|gA}^-@H52$e16%(c z>mG4jZQvB#7oTtUok=gUexWRW@YBC)`8E1p=a>fKFpre6CZ_FApGqT*W>+qupT7PC ztI__!A=G7J>%TCmey%k|H6#A5qZ>F)AyQpaccbp`azq=~QPflwNKx}$zdYOXlrt@; z_LIl*MtP1>632B{#n*FT;U8AZSxAneVG88FqGOoDNOI#j^feR68uCBh}vzkFGfe6N{=hTNqCb z+>Z(VQ^G+(C|iXUttzIa@XEwfzW6dsg9%kq7SYWtD=|3XvMk$M2l|{jfKWn}+ zzFo<<>yeyl*@@`;OU--e&aH6k>4)6P+wa&t=CgNyaISfE-zI+g-H*_1VteUW*rUQU zAn`9?<@2l326$>yh`VjzUVc>!O5L;i^UBHUyJxtdK;mtH<99+JT_=%&XlzT}NSI)H zL2#*hz!bIU=5NW$?`IU#{Wfg4vlUwyqn|A$CGldZ*NJ5&dr|5fdvjU>C6Fe^0$Hs_;H~ZwXAxiX zR68cM_)Y1IRXrbe;9-vBNj!Z2zrgn&W(2; zg|)9u#{a5Rrkh^+sa%tWZ~noVj6lmr?3fkAFvj?`A&13ZfJ3_DpZZ#z-8<=A93pa;MJ*1iNyQJvxx{+Wd@_e@v5&c)l zpbpUw5$q}1J-YKd5q{+n7f-G<qp$S71c}Uq~QJ^rv5Ujt*#3jg=x{^6nA$i?(U_y zJH-=%JH??`a4iIPclY8h!3k2_ogxJaU+?EV=X~$4Jx0dJx@7G&*Szv0Gh(m}tz~Ob zE+%TN4yNW^5rIALL1K{pz<`52B(6Y6G((aK9_U5fqseIcrgP6^nSdij`}rO}%F?7* z{0qYPDV1)=gD6@i@KRqqAr5JWcl4*gnv?7o_D(jh$&a5V&%z#9txzqO27))ZdHpINa8&{4tCs);8s;0^pG3%jDvu?* z%T>F1J$5DpMA~C{P*>y5KIjnTTj)j7ZgF+i&x0~602z1EO%|pLD2T4KA9gG^D80h8=N=`55@09IYX@evjt6xK^4C^(FSGFY$6S{51TRX z=Qqi;-Q-sNyaEx{N84-!StBvU%eUeejmn7U&wgvK+lTf3UScIX$WCz~fS(!(3I26U zZC;AcW?dsr$i`2D>!xDqHR-p6*FmtuKiCcJj5T>7U_sg@{_aH3khysEPKx;V|LEBx z)s~fp5q!wn`+o_t1(0?WUbX4{xmNa*Zx|v6NT4-7GFf@q5Qr;?bVNlY7osw6i$H@6 zeFwm}{}}4|@v3-H;;S4!6F4**p9|&Wn|v#Zz!fPDD>yRh%PU)XN5dbE@hTmOir#Ih zy~2eH}1^jC{2K{8u4|5h3KYQ6bB<-Vz$w(kvYdxqLQyZR^Vt`y`Z3S-X1MJ0Wt zq0pfiG$ex7h=~E*tVJNYn4#EX59mG$RkJI9yhHpY7m^zj?*zl4l?EX9S72tPtE?zp8JvX}ogsB-DOX*Ep0j*`CAq1}7KM|Aqb0X4uG~ zP;xzuL9Cg$xaojETwjF_w8t*mfT~!Ob1=$u4qg7C7q~KpjFa@~w^Zrs(cBr#7C?2w zit$Ndq``iBXlgr^myCe8&4AyF&?K#cVI!s4?sxTQ(hXBo#|)^9a+DdBuwx10Wcy3-b}0pAEIbFhEqIBe^X zT7i9Gb98eor6NN;+KgKZlx^dlkOz-iHMc~lgYPEa;NUvV5MX0x-zGyUZhyq;d<)Z7 zD{&cTzBrw!%qx6@iwd3ZtJ2txYjfSnWUwwV3sV}JW|4K-*D`&BD?oS>T87o_w*R83 z|3Xd{zsj#LIx$n2Fhy)I$2KIq#RigmgX21d0$Tk~;_)tJ?Swnq3()#(Dtx(VxDOuY z&$fvFmYI;y*WTss>+Amc^3{+2&|9~;bnknB0YZk}qCORqLn&o{W@y}6L%tVyM1{A? zeQqx8K{uCgZaUD4C~&%Tkf*pXy-r)6h~*NdL&9?B?n3KsK`YTDK734jhD?w!p9&~W zy`9HKX<@MwACcQurB1g+8hp-?Xe?c{L{1%vUQ5%m;Vlm26AOIZ_~)v3+V5z@3%7eu z)VPbeI8@J3RHlqoB3GE7E^|uBAMuxE`MwB0WnVTQ7>Tt-EA#7c+2 z^H%ncJOUFn(T(L|Q3Akwy+H7cdWkKy?j4{)UeQ`w?-j@8EY0xp{SxAwcf-;`eSQz` z%u(H?pg-)&3+y&!X}(>R9DY>xSJix zy}`9~C_dFhzno$Z;DYKr17_=`l_{=6nIT&s$G*lK; zdvwQ)Jb(&2lpwSFV*|Hn1#fUY|LR&E{5*1oecYAl47R7!lN)O)atkwY16CHPGkZ z(#}ySc2nA@adtwK31lF-#*fItt(qrg6eH*CZc%yjvXYLn+B%Z7{zH%ltBeSa$}sNx z@;W!c=D3cW#i-7wHF3fK&?OhgU-LJ(A?;>Qb^1z!R0W)7P@c4h%wNF@}piHG#z zdpwXTc(e+aHcRoIU;&e~$yA1)ir8me?;YOvYtg@v7dgZ8tuMG-^fiUSEu}qK?Irw1dt#+x z4cE0txB8sJ-8UAwjh3G4Ams;K63fJ`hWx2a8mtpGR{d49dh*1UdEswhPvH+MM+G0- z9NdWmFn`nHOp{6Rt;N0Xy;>5OQu&JxeS@1r4!rjtXS&yH8l#w{WU4V8T}V$P3$9|M z6|5|;&ELc$`XNQ1jM_L8|4KgY_l4HmO1q5v4Gva!#aAS3hpn7}db^t>r7U^4lKY+K*!~8Guykzt`wecO z23T}-!F*QZzz@NH(KW2P5%bfc`YO=T`_A+Wu0&ykGid0y&~YF6i2wAjOToafuLoRX zj!e)Tw;srPvBxofdz)V}hPDUuh87S>LyF}_Y$mDy+D;_- zv8)#}D|Gpcc?rJt`hVTn$B{mijIEh;q})}nd|ax*CI}=;af|g04y)Zg{e>^=sf4k$l0CohGUvXFeM zu;JPw7axVPJlaw5bw=T4n~$ib!{arQhRfMy04AG$WI;_&cCL1#x`+FNy4m; zs`%OD*b_L#A^;noXcrF(LM5Iw+02USJp3k9HrO)QhdQP93w?9 zSw^0OwFM7&T~`rM4bWy0>z~r9zzKb0C2ojQiRAN`<1g3(scfp~`xlz+K|87)XN_4k zPF~+vCkc@vG{8TE@>u)0ezT&X;lhzgyPAXDEJYW&Th@5OYKXJ9(@ZYZg{C&}K`L&V z<4?b6qi3QFDN012>{edZ0#PfY#21!>72wso3c_xLPov}Q3L*~b=@Ydu|B$R=3>Y83 zJXEL6h(%c&KcQ)vHE#4}JH78N)#{zou%HiHOwef&M6cYQuEdG~3rLSz-I_OG&gSoL zRnloTt-$z__)|)Z2`C9@e%Ru|_rV0djP6gDCywB5zI-A1xlt%@pcUaPmpCB4QN>ZR z*p3#ckd)EafRf`Npag?plJ-WKmvD7u+WoXb%5A7872LE${&q}i+&BNJzrZhl7jwQ- z4}AFJQ-6j2HEC|&?qcoOsElRHq|FZu_S!K^F+$$#Vq?C|R~CVsu}XQL((x@v-)Qi7 zr9}OZ0}W7!0D2gVC&3r(ORP+n)SP6)qFT6dXT;z76pGWmiu7*ldm2*Xdr8m+6~fY? zmhx}cXxzxKT{~El{1qaH*!oFb`Y^wI4BIAX8g(PN+YDi&g+ljF3v5XdhBW_4&YWxK zLyI?HFh<1YxQTZ0PzRIJoON$Rdejhos+hZV_5x*ufD&(Ai^i?Y)=_%$F9x7q)ylQy z!-nPe$E08ly~KRNN}uJ^7{r z?k#0`fFW$#*hXM`k``HiWf*;ncsgb=PVBvvr_!{TLqR#+)B|9la1DkP)Edfvi$(lZ9v>dvYWD$O^S?K|QBB@|3`L#Ls*FAq7Wi z$p|~IloEeG7USasBKT7r&py%#`MVjlZ#KjO9&sEbf0I?j2d;Q+hKA31YA)a4urov~ zu~Op@n}vPT7|2tc?5D8=FvkdOBnEZKfb|4T@gD=q+sq$~WayD`6P{R$NO%RPN7ZSe zzioy)uJwJ`9acQUTaAyh=kQ`5BQ#qDXiKNT#zY`bry8FHUD)fX{{$8>fRtbbrN-Ri zs;XoL*WvD>TEa&F*^-^$@=5-Odcdkazy`z?65J9Gxq0Gh%W-fJt0Z|CAnmFix&RXM z9;QE}Qjj$nkwyp4fT31RC*=l*3#5iRR@cR=+Sdgi7mPe4z@$IDXgp(E#ZJh|U(2U4 z35Jm#_%bks7Z?O_6xy(2aUm*bl0r?(>xD%jHTWgKn`MT!)&}2RK5E`|k6wu}p+yA# z=6jA=7h>U1X~xa{ig#N=}1(8b2$j|F5fcvL9;@H4nF}`_D^G>v7r~1SEmmB#khcefJ71_&C*hl zmsMk7wFvAaU5>}U`wi}>(HpQi`M@wrDxbqByGDm4Z2V5uvKi&@9x^_g!=&+3r}gOb z5`?{5uGQ$*h(rhoa=$p(649v%Ij=9(kngAu|MQy&EkN z)r|A+9qD`LtBZWD>foJRgnCV$D?^aCf6QY2rQmzFxs)lB4rV3m zVr-x6m;tkfqNMNxCinaDuqh7+!2#yyo6v|j8WU34T8wJGC-&%~Hr?>~MD3kgMfTj@dCru*|~X9M-6kcOfGfe#|kMbVR-w3Xwb}7=+Cb- zX&3jvF|&Oxh3}mf2=ho_Fj+*i{UvD9mZqO<{Z3`jBn*sfLTbc1;Dh_4U0gs-7rku6 zk)mu3+cuN%2U*!HC__1ch@#)s-5^N1uxw`&nb&!x#E7b}npP<&ml(?R(> zp(dkX-NUtJV%{&8!`y3)pJSNU7$DYEyCUb{pUHOagJ<6L-Ns%DVI8wDD-3qaSQA@bQFw)veF`v-ItFRC3OE zojZ;qTibIrO#0Iuj-A$52YEz@~4n5t?#mo~45pdfAdkb+f6TvIFy z@*Z9?0q^hw?5z7A*W2WS%`aGiMMb4nXt|Rm12`@YGLpYR#>p%CwC+;N{3)fpXdaR# zm?Tm~a(!`Af}(~`2LzQ(olj+zeIoIYD-HUd)U& zpnDR`YIBdAs8KWwBw6%OZaVt?tZ`cYQpe%y`ZFA;+RgYhqAs6q(EjUfO{a2iEUr}m zm6{-(Sbuv6XarW&e>KPteKL)|-OTGxmhXpA&I#onwp;XezQJ+oD`3g%=7Q6_L}u13 z-{1&o@_aklz3B-?mxXO1aRT1#S=}v7e6PIPD7K}m66+KHBHTVC#)9*5Fi_c_J#E~P zr(JMx02iy!Ll@nGb0#a}pqRcPgQxxDa2ii2$dK(uvqO+H>TArorDqy#bpHSY5#M&4 zW8iS*v|HGF1^x8yv(zw<1 zzAhT+Pu=c+wB@NYd75Kk1ci|UeHiCqm-2 zr>vuUl9A!zZ>d^dFJLXOOl+&%+P;2xbQIG)zSVGkuL{Pk`Zy)MF&ydKnuavS*M52@ z_g!NkY35v>GoEXQr@3!oE3_V0-0PjBm4}v91P+~`51kueWZwIo*}wilYu##f?m=wz zFhN%)DvbdIB9}A%620$h_#9O&g->o|EY2ZHSAX;{VSdj+*;Aa>b7^No@cwgl zU)r)6J}{Ay9Ox>+mpt2a*GLVLwl7bN3rfQ0nGJ#6Hf1_bg+9^CHs?GoX;M9 zq6J+n1X@#2b-|H%j#bWAM$1uML{xUwpl~eyMQogL$Ax{+Omof5^`DY2rOpmr3-yX@ zNdg37rRi$fvGj>T`Gd|ixpX5U^OMYUR`o;VvX0#1uxblM?vWH#A|t)V$M)Lenm zW2*U7yufw2=U^QXz|JJE43K98DgGd5;n;pvVb#h~o)OMCS@ z024klf+LTfhFh1j%#)=G!D}S-OD6voY-H<>!uCdtclph;SgpvRZ*cC_2Z=j%)o*a= zDX$ZRNHIgi^Bu}M;d0t&&Uyim$OsZvEyA&<1%sz$3}86+{jen^kMnPv zen#=`R`lO5d=K9zC6n!rNj`b+Y}Z09hY%7ccvaHGv8*Z8>+0ng=BNgM7aa5!@N$90 zBhT^r`ZnW6>-SA3OW5L-MJ=;rk}#PLN~fAiJR_v1R^$A5f@7ID4(WMfIE-Pt?1Ou$ zJWlTBJYxSgQS#?vC*FAcaHp5S|4jXgnne3|UnWr_(1q{4&+qd*!nRu0qnEY^LnD{HOw!q}itP;okyCp8Ku(RVs5f9xyr32uQoe=wifj>IxUw z?SC}!&OU3ya{uQj(Mzt2GntH@qJufl;(0!3PY@f3&<=io~Yp1EN8R)7h%_ZsPo%>dTrHCkNS$iIbg1 zD)=+LIX-QRMkbqIc+6VpqAvc$>;J(OrN@5q0EDtQAT_D3sL%<3EX%d65-$=D{}0{; z@&AP<;~@GUUNv^y?pHpH{{vW!0CS@$b5*tH8N)RIH$ce0Df{>3FHvssA9H9@uQ#gW z3OWqPY1{vUg)M=L7XH@Lsku9m2$g)NZq2yG`EMl$l|o@gdj`M%;hND-#tXL(7wZh> z{2y59{~PxI$keVePG$YUQOYlrOKMUED%V!A^PO+e>=8Z%7VFH3&3FlrROqn-8YL_* z#Zi|?<3sRseG@DYA1g$UCM3Wmu-=ML&s6ab&1pe5XsuUfK368aA(-~nR(yp;cmAn5 z0b;TKBGkD<;_H$wPH5h&_nk`E;ZiOC}Bk{m!Az-*}Ya4^(K0c1My$~zQKKY ztbMqAG8+oKBMCeTT&PG6 zzHD9l?;D? zR)=S%Kj#vtHA}q}J-7FIpsi-ZJ2V%G`WKBFMv2*1R#AnsPY>}cudJ7qPtliw*4B(? zC9OcEoX?xinI7x1=K=~OjGBh>v&P0o1DoH!&*Dm@l|gsebt?^-rsuYB+Qaad>ymK^HZo7o~uFVAC{X%;l3~Di23%J-D-Tg*P>e6 z1BbqiSCZC>L&yi;9cDqBx}A4W4ek>WX8_~`Ox8I|5FXRX%61aVS}nnQ^$nt6g5?0K zxrt|}xUN`jE>XBqGk+h^1oJqo0G8wNmC1iKJRsD@Cxd?cBBum%7}_7o6OJ6Ao3R`Y zrDqc$RD?D1Kioc}16|l{t#C_q)b1iaSzH)gdR234s}yW4Kjcrz>4Exe{PtO;gMe)z z!SCunipas^I{Ca-Ikl8eL(;!F!ykp}KdcDmu%!KRA|zM(2=5>thO)`@y02org+oK| z(YsxzDh5;Apc#Dss3B+sEC4LTBqHja1yGeAdst85Z#5Nbb(;^qh$|}Y9%t_k{8%*K zs}?aTyKK4OFU8&=UxZZri%8g1P|Q7^JW<_dVY0MHiVC)7RWG&>SIa376>kA-EZc2N zTW^1XiKWq@`xNL55~@y)Hj!Gh`zkE*34fo+bB-2fTLBq+=S(7KCG8f}glp(woof<1 zuP3rdwj{j*V+UNElw#H>RPln7^Hp{oxK&e}2+_;jCBHyktuY^hP8tgQ;& zxhj;gCA2qRq6z3<$RX)=X$Zd?EY#SlWS-tR@#WUnZClwWc>;lSTN~GL=Fe;wHa(=Y zLPqELT(p(TDvLcxU!D{9aL~jR=R??yE=sWpy%5z2uA~}mbwGt6hyr%b#62B}6QC`fBzG{>EU`wW zoma{@8FCMOs_Mhj1%pp~Sh$w4@XWEEp#wimstIv_vYM``dCMu!$j%maPVhkvgd0v~ zWHnO%C`{x!>xnRShA=q%eBZWUzPjopfwwHQ&^BB0XLdO2;q^208}&yICcbJ_9|x>H z;IdksaL~36E=Y^?U8w-&oe{UB_b=awF$InvfJH+n9*et7Y_aU{LaFgA$(o@G;1GAr zXMP{OBSfy%(`Sz`+xzSO5MkY71US5)k#7MYnhG&WED&>*<9)AHsdVGXS3k%HlH<`? z6kRNa!FjL(=vx!YrU)h!Y+)r=s+9PujmFuVt!*pWHHdGsGimp@q{rc6iJXR3nGEqcB-IUYh~A{rGOJKfF=z5K zf&^dw`KgU1YrX+>i3+H{Hl*IU67|eZF6rf=6?H=2kyXQ7*FD3@yRnYS(ycN20l`)s za(gV;=Bw*d(WC|D(_KWD!N`I;$THbpbgigGxY%2-)Mh39+^R!s$b=fa?DXE@Lo3?n znKq^wfLBG$YgMccMUAx{?V_PKGdVQ_!}T4w=^}W_5&bFh_sEdxOZrTds(XYxI;6mp zpcC+=)s5oPbybi+EJKi7gH=pVN%U#$lXdS9Iu8l&)Qer@Wc`z1MIL^a#zEuaMJFs>k2nx=8^Jzk+@^yENsm3PB3P`sT$qt7a+C}{E!)L16w6i8MG#} zqR=!S;2ii-O~d7^m6vibEM1=f!;IRi^RMp0Q_niI>(CS>&B-W(Xn2)E^3Zt3qSTWK)J zDtXc@YRgO-Jj6r)ie*QWqW{GM1{bE0 zuhAWRuqBmn`B7^tiSZmMmRf;sRj+}iM#YPSsqlwN$z&{Od;46QAKmJVG{!l3?(!t6jqNg)O{1HzXr_m@2DTXZ3*U;p{ zqsLww_+LX&PINNc(lx)SmNkkqk+j+h)3JOWw%v*8q-`Kc{qJwGub^E@-T}de0#Pi? zbQF#(xptK%(isskQ54&r4{#6{%rgMe=Ng}?EL>w%52?YR^zA{m5vxVKk-dw3x0+ho z3cs-Y*sb@x4)3R$451hoLRLir+Q6L*G8Gs>%)A*LR9djPpx8Fb<*#Jx2f+VLElLwzM#n3&WqX- z5y3}CX9Nk`Om&r7%$wpGp>29MI(%rz?6xt=vthr^-** z8S2gWg=>>}!L@nyyYg2#>IXa{rc&xD!EBwr{B{mt96%tJ4A&aKg|v5ZyRmRmK+G`1 z)=;LLT3dg?%T5ldsDFsZlj&v$6mqroe6Exk&ppYcX9{g&^_HLfn^A6KBQ0!_eEHR6?|Fqe zSZa)4hbU$bZiEU0Q2BFQE?yD^?X?qfG|?iUg#27!+iIjhM1}WEnRIKSq~wLA9^$Bf z4ECpK-8T~N{~bRVEj+a<>ef@vO=G?|Q%lh2GPxr4SRLk6E$a6To!|7_ymYz^z_p!d z4n!gyj|@UJ&IgbaxL4*sX=#_*REMY&;l*k-i{GuA8qK}|ozF)PDG`pEV}H}G)N8gj z9LtTg3(J#;!vs3J6`0#bUuT60ZcPoa^MQpSQ*e{*)rTJmGf4}<1Q7N#T5*L)-7zDR z!k-(|@3-hKw2OI+gdMM?s`20>40yY<*KwW70F!v+`&Cb6JZJlRuZb@vMtKW`4) z^)w~z_l^i0jrFd1CBo2bl<}vTFD z9^&n*5=Ri-#xQ2I&1m!PHvtXpF>hSHe2wi^1G!_k?qV^?CC+98;$fIj)v;%dm;ryj zdP786JLJ0*9&Pz~LfZDW!%QiK6u;o^yHF^aM-V$cnv?-27F;K_=ll8c(pWv$q^QNs z>l2P*l%4n2s05f_46B)X`C5VqrhnaBb8>qT#T~vslu^MA!Xdko&XoM0XYiqV`9&eld4(nZ5Eu) zoZkD6%HyuWY_HWe*V%dm%CegrYR4M<-#ZRfgK`bKr^?|WuoW4ncZE3>0M{NG)*YRV zadk0QaVBKlEC7UrY0-(JQnexs1CY&COghk8sUr$_N*{tLK=SDvITNXgMgSrbezABHp8%!}n>^W;0cIr5a+@G35m>3GMp4<{f0f zBNFvVX?A&YB$_YQI*nQBxu(gt92GlZB5=GG1lh<9YB-A5UW`ZfoP?$uyce0b2&?9{ zGd(AXySwl}24lB2xrvSPHz|B;XMRzqi5NoJ8|aZ3F;1gVVO5yY@R8ie_E_+eZAR}*KR9_p2S*ed4rJ-nHKdfL#YRLW? z|M2C7;85Z4*QYBTwm8gfNLkwMg?7qCW0-c*`YM?#MrTNz(J}F~lxu?6bu+BhUx5r0 z3q&lffV*7Mg>MNU^o&%t{*3@A*H#_N%T^pSix})7yz#u!-JyM#MF%WnvTC`uX4(>? z@0eXrrFcf>b*n%?21&wuNRsV)-fm1*R9s)`ulh57R&(tdQ~d3G4K<&x7hB+eA-HYz zsUG;DlXucGeJg2$7N#-TKC)7GC$ShmX2|0*Cf*!F778KQs`Th+Ps;KQS!lXN^QcYn z;!^>qoE11a9bh-Z*Sm-QPB!BpkvH_G0@M~aG|fdQATq@mj`6Umy2mZ^!Z+!5wruzK zaNt+#h@??@`fX1cyAWyvWxXom13uRu5y5rY>KKOhgY(@kBwgB;E3$kFzU2+@EpTiy zA(Ny=QwH5>TWN7-(=$bXT>n0O*85mUn?>@I#aLBIu&$;TD+0l+Zv^}f4zjPV)mFk^ zPbiocG5_gKm0!Wp3#Z07S;e9{Q$7v6x|1!9Qc@UiK#k_6lF?DOl+_P3Uwlulwccv1xG187x6j_ zVKhz?UR_Hze!P(GgpyywhRo!#jp~mo^koKd6qr=5(NAfTF*Z8JVfVP9o6Qnb+AjPZ zIXJ1)impV~2tu`0MC@#F?4>VH-A?jQLKUaO3l+K~XkUlRldK~+G|Cc@bL78Sx;nrk z_4H{c|I*GT8}E}e;!~#@jYCA5zvvWvlBUdSy-VY;Rqac6UwxJ=n;l%iz_Uuc{O8)# znN$Hj{fX*?;KaLPR-g}M`qb1FW%BG1>6ZCM@j#wciWJG((45yZ)_cl-V%PaxPmY87 zu*pk-s)C@eT0}x+9ZoIgi6PsUz4G3)c$KY5fu4E~PXXd-z<8_2clDWgk6o_Xo-l6u z4bGadd&N%#q;;&LB>Ur)MZ~bAo8FjW~7O7(vg+xbTawxZ(gvKIr)RY-uwBnHMogy_= zeU4{2YotUgO@iufAo`+S37o`F!XgRD!w|_23GI4=qn2WrKH$~j2pltWX|lZ!>BT3t z?LRXzGCXpny_MybVPhU(3+Cj6WG$(1ML!U`pwQ6R2l3%!U{G~-_=WbA#yR^no{>Xx z)m`G+S&M@_mF$IgxYzYGGE>O(-v@%bjxJvPkD__d3U`G#ZJ$|- zQFdwH;O_g=6l}f@ka#a(O<6Mf(C6sc45r1*(~i_JBix3-O)o~?UKKr=?Yg-4mI!(u1@%k&+^bX)&#bpM)`6p8 z-}}$)Xa`}wx6ET*{s7vOy48IJ zzm%%jrNQ+GzAz3Kx0sNkDqe@h>)FnP@(LnvFXw9vaV6CRbgZE8YnKsN!)-j zAjUyViZSGe2--+Gq5RSpH*Ma1``^tqrJyGD+^G`-;GTq;~dLn?QFOsJn zMgRytHuqBbtld3;yd8OPEqkn)kA)+$|4$bkl`(W<742`q3hc#`p{4Dn8t%$eCB8vd zd4wQgQ?#?+$_340jqA**QWFDL?Jl8b%eP5FZeFbx!oU=5B)%)U^)~qg~7gF*Idstr3+M ztD#48eeY(0vj>HoP-*|k!l7Ft=Ig zPTDnbKihwf5rI^MPILtbKr*W!|iq`w_58<1>yMl!KJaEE`^k+BS9~RSgJS+Jkr?jlGkr+Lhk-iJk%lt za#pJ-nHy6U1F%Emo0FR8iCL#hj`+jei4P@i=-j)%684l@i!N1-#Z-$0dt zn01efXYx^4u8E$!n(2gvgM^N_(uEvag`cb3)&<|t>sBpScA4`pm6;}#SN~H)fkHQ< zXS5Y~3XYD)MxniR4f+HdM?9bG;&J?~zRv%A{ld|sD97+h(ebTJnFqLo?;QrEA8;Qa z`kUGq5EFKS`68oBfi!;Ke*#bL%16i0e)r2-PM2Z1MPSExi|h@~>!5hxGcBFJv7y0M z-MRgbj%B};DK-wOYtJvJb0o8-yy|FW!;p5~oXIm$fv#m-;qGr=8(gD}3n%m$clsNH}Tr%dI&xB+<*;C^{VOs~50T+D8vtF3+O+UPf zjXm`|_`YexxRF;{L0eb2rDq@P2(6!raSQUxYI<3sEi6e*7VBFg(1l?7pS&9$c|Fz- z|4?Mj+(D**_#J<8lz_YyHryukim&fe+0Mi+PWx9E)0tIX;2{n9F8(YL0|OZ(Et{&ug4g_o-nc5qvN~Z_&VlpXPn}h3=P|`swQ)0MEdgZ!JxhPJfsgY*-i~ zo;dUY=&?Rlv6Yzx)FIc7er;8vGkoqUpje?8aAu)xiu4N%nZyp|pm0I&DyOP`AqRe2vtpSShk$sd#>>;6QWX+TVtW*^V$`(-R+_OWGZ0*mio zfBnIZ@k6ZBgMRqgx_B<0@@#kLQi=3jaP46#9>_rKF1|oA3GkGlP=zhHhrx z2!A1_Y@?28Ts#xmBMhyQ}@f0hWbDZBwxAa`Y{7dG6oT$%ywS6)8#rkJfql40r#78>N zj!2$$-xtxnMhpsrZ*uk)_Ej#`HEjo_5x63mmPcrUEs&D)Y(BHcAD=~q9kNt!jTU}` za+`&XWouW4V0^oy)M26abiY*&B@ihUlpchc$=8TK!WhIk|4=)Rw`cZ3sOQ)q`YwnI z&GC8X8VU@K;)zT)Wx}0{J|6f2io2Ao=uXc3A}nwOBN)*c4Rh6<5+Ruw>>Rf2WPF-ykjzRVtA%B4DqqU`3eYb(`TQp zhg!A5{RlV+zHkn4l`OTtrgCr9i?LECkEc^gl6NKzO~rC=-~|`joLI~4;pR-!Nh|Jl zD^-c(T&8SI74OfKzi+P+;d~%sh--HF9XxYXC-}qEE+vsY+(NBhC{ zl|fxVTB^yX@7CyZ_M5{l9v>>+;07LT(X0pR9|TfI&x_o@e?%OSgX;@L5AJc9<1gxL zt7xQ@sGpYLb9>od7G10xJyO{65dZS`$xGnNmkO+IQ+jWoT^|*<1Gg+_O2bFvUv_VB zeuE-?7ca}tOSG`tKNWvWsegU%+G~W^-`$2sdi)L-S{1;GDE85NCi~AcCy0&Ubu63rr`tG-4=X(7oj?uQV%8aiLSiuE9z4#!n;Njz1EU0Shd{=H# z@EKmCXc~@AtzO=F9}GP9g5l{4hOcgu0*>_GOYAltX(?4T^ zHOIEqcg)W3%Q`RTG7_(33`~7~t|XHG;ZML6=yPkr%5!5$LAJYLceJifnx3K0`WPB9 z7d2qUFK+EFx_DcanArW&P3Vt`)5?@@UFGy?Ps7!~KWiQwO$;7V!+#1-Q~{0&{Uwce z+x#VM7l_K_e1j}+#xS+szq%tVgF}{5Q;?YaN1ntuv@ML{- zJJGpv+{zZ&m=1=rfAGs>12LdbwO>CV`ONL zFjUU%`(#5NnEK%a-I%Zu-u=I-c(Th2-%gQu)Fo3AR&dC*y5LeUl(C;C76~M!5nN8c z(9dG7g}7jNarH@&ef-P;4I7+L4M0U#P~M@lpNlE9k@RA!7b6zQC54lWy;eF8TzFf%F6GyT{^h=oD z^CC3#)#lSLwEQKBo30v>;K}N#7nNGR#ioF1>UZi66dJW3h+f9Z+j=V@$^q zDaYjvu7dzs;&k`#R$c#zqsT!%Hqoe%#$H&fpGz{pf#@lZAIn{5DmUroD8K%Kv_GY` z5n_%`>+&L!_Q`Y^YN7Vf%l8Hs8NV&@Z0+v;xBMzSay?MzC*`W_t#tssdm1S;TvwEG zu1un%B;`p>;t$r9K10sTrzI9v*X}sr1JfHE-lW%<&xme4{55N#<*xi+J;hzj1Sz%J zKf1{HrveA8f2uq%>st>>D+8-M0~W>jR&HLTEU(`+rx}KuzZP%s9j;!QUX- zeOhEL>_BfV5@YUOIUSssyQ%M(VB?~^hSvG$C$5BqB`*+g)9>Db?NE2UcmZ;2t|OF^craHhz8jQ!c`zws`ul{Stk8q4ODuD^-;}CVnQ6oGGyM#*H8W;9_;Ii?_1JS?^CdAG zj~V>7J)e8nR4F1jB^C}qF%(M6$^FQY!YClD#2ZkMS(w{#()QllnESDcYS4V|yZPQn zrNfpWq=UM6z*qLrh@id|L{g=n;fFIS<4)bY^h9rUOv~lB{vA$@KzX*-NdfAS z2(Tr^&e#|46CUY@b+&*Ww2$nLU7abDCATfPeDlz2Z;3Wt9)yd@Lcq)%$&2{sh-d0s z))JZKfI%#n>h&{*wY`U%ix3T~Y3m~!Vf-I(zc=NEI^{X7hE!z#hhjI^gyRu9!~5_C z>W=p#W5c%ddlGyR{sM&)>-%7jzzRw}Y8;dy#WrRse@SpJc2P`_BpT+3JO{33rSiSy zfu7>OQhNEfuEX}ZBrscf%4Ps7nuoGM-A(lzYl_!a{9}6^`N1R8H`~Hc%a0l}AViWY ze->xH1&%;lX~;;3;Re&fE*crfvTOOh{P_c6F5i%+)xB&>ELe+rM_-9xgg&rCvWGb9 zk+lrmSH}_&J$WwzDz&Z+DF2t9wyl!2hb?<@V07*9a)KSuJ22& z-B|Rr`m3MFh#`{Y!H;he)BG=iE;tOD+d9Yd&%GCqlwGHwK68{lBFk**+i`FzSRt2X zjDQFCq|9LM{#Jt+BQbTu{zPCEcz$RTRY>k=rce!}7pGt&+hOkX2zq3gBKYXp-QiQv z(zspB@jgRVI$)TX)Sp#9Rhc_5sJ0{UQO|sHe^val;SCNEEtKq`96WTXQ2ns`p>M5!kyu z$^l{TxURgxrBm&$y2n|Be1rbr8hlyMuh+up6cBXgdu9E&!|p!q7et{OM5wepX9@SW zI4TG-5*gR@BJ-$YUhG|oTT@TswrtF_c#L;=)p8+DmH09} zct|L5#C+V|XteL~oji0HFm9)IB%fX9LAo0P{NPY#fSH19^iJO=E8`r+AUsgl8rAs(x4+@^h@=mWJ~of;%s6h9(1sw@L-BK{2KmGa)I-eF>}r^`-Ykq3rCnEEy^w$V-4e^t@W;P zo&n7wkpdBsgEEt6r$Rh?8iA{M-m-kGK46{K$=*WAVL8KLGo@ z#>)=gGi1W`PzU;|_T9}T)YF-oZ{94v&ytwo!;OViVt#5gCzD&XD51hpUxhS{(+r@0E^PL~Q8hlPpAHK(Zw;6phPEuT_!3(E zjn%lw;_sKGnp!VzCf{SNK1>sQWLvGj_s1R=pArPI#``)(rr|^Sx$vX ztAq$5BZHpWMWzH{HtiIx5?rEU2j<+7W+X#gRbX0d4Mq%~-Qe=dg+GaUdE}iG?BCnC z_=>iI+@wU7z*x7GS7n1s_S3SXa>xOoqJZsrg zuakZbg@IsTTDCisLf0SOO?%;rVE-BI;d}1WU`3KNu7}EIj@F$TXqs`>m1l{AuY+?|5B~{Nw#) zVJ@g?9gu$?;$N}U9)I+V(jK7#(;ERF+o6yl#H|{iRaJIMG|J(pz>H7hbY7NBZz{^~ z`A$-P2#kBClAYsG9*FN_4AlUMe<@+UEGzm??02_hU={u+s1i}ZP}vqB@3Vecj6FY| zEz4gh*SNLTO8+n2gNRTLkGPN}Vk76m_T|TNnzhDJe$NJl zPL{MFtXLI)u!R}XAaY0ReR;bB&tE?1GmD;eFOa7_Pqhu_>DCBmn0dL-M?o&Qe2%KRB?_r*#nI3I7q*~W$CP-Fy zr#WA`q!7HPThXmkvD9m8R@eWe{td>^ls<~lJafNP$Oerqommjg6vw}R!P5}loR6yX zW4@NI{XV(W`3>rXiXp1KnG(N4oMx7xg8@QjxZTO?n9PFZh}B|#?k|Wdu?kT*DRO2us?Se~lmMU##J(VEhUw1d)X zLmVhZ$X|Z`drnS=6dhj_SQh&lZyWCb#zf<0bOW&(CL&7%qN+K}-!*Yi2*)IT75|n$ zxOSx>?{F>%Y3=+3#!tfKF+4w&>tf=)*5t)${y}xh1pQ;vYPi(&E$gw0$CS9^;0LbK z{fiLey$reX7()DNL2zdZNOAJrOe|iE<8AaaX#&7EY+iV2a81>Th%Etco~;}B z560z6cyO z_9U2vq-XAF@UL z_>UAupBLW$P@%VSxiiRns?t%gVTmUV>62ny@6qME+t(3{T{Gs2=^@MxTwuyaic9Qk zX1*Ty9;f&&k9+CCYdPfBDt#S(&BX0wxuV#15i~Slb9(_iSsQ?!4S(6^Fn8BA7_V%StJw#~1x%i0vQ`5FzgfBnz6(vx zL?qcN2+l4iSGti_#&+)S(#VJH|G2&{4|2+m_oHyw!4v8ECgUTT*=O-3%ptD&x{ZyL z?zwe*sk6Tlr%}Y7E3$`>@=!fL$jZvuS`FbbuTPGHhy+>1XycbD+EkK-GnRu#{z<@G zhqFAyKiZgu-%0!+Rjbb-zpoYhl8K#u9Wh@}D1?UZ>sDcsD67qF9MI*SR(CdDmZ24v zMMh*w;^~mM02E~j8r&Amc=?&fLMDRwJFdygt7I$$!JH1y;eTwaR4;R*r{SkMaBf(! zbpQNOoz2zh_?Ob=j8?)6hYzaIHEqd~`D$ues&psM>*B)Z%r`xj4|vthVGi!K=;hnm zk9ChI#RI%+?u#&qo-RTNUH^SVL&xmZvzM|lEt}hCK>1V=_1?`GD=;H5?XQv}3D7Eb zDq{}KvdsqWK!}Fc#aED!A56rMrno^C?|J5he!M9}+xXw7s1&7tbarUgWOgU(ZcFi; z>n0c&4maG^Kt@|)@OF#ZfvQM}N1j0kY!@ zHGM~-Fratf;pYBTt}>v)me}4%j99%lj=ptiyd zt;jQeb!RY90MjRlvh%!j?CXPl(z4mEi`EW#SOgAZ3RlqhhrH%-Q7fFYPcE_781S7x z6u%H)idtQFP;KN@zw79w|D0!o|NBgyTP24h&XHS<+1L0`-b@|_1MXxkwb~{yaQZT2 z#J8fm@*j{^Nc^XvG|T-GD7IiXGAd^}mHnRr3oZcX@FfsXs}qVrz&wXdLg(Oip%Pp# zJw`*}-p}0|dCz)4D21lnfce$=9)d^}@pt1Hy1Xb97Q-!K%jt$7wijW2r9P`ZyK$*G z>3psHx6$ZONWElw7^Q~3=IboQew?4TyhX8PVaa@$qp2s7`G)GCCEe=I(Y+=8O+@pw zyQ4oT2w31|xRE272EWQX>EQ&3<$cB;p>;Ix$a{@C8BkFZJl+F(d6538a+{EY+MkP7 zT$I74{%8Db1fC2h)WU?GWN8%koa}rCCiskfTl3vI5a-NDS;leT=R9xd5`09{tgm(_ z>K~DZo&Pz{AKyqo7u9AtH7Mb=rk2zPZ5cN_FBM+brTuuuCYa%uA4B?81+nHOMSoq? z63aKnRT@)jAo60$J*NI^;jkKM0R{{ehOzjE>39sT>WwZ8Ba z*>5)m)dw%NJsIXVBz?vrFJ!*c9Kj|vF#hBQL1}WH>D|NqbT058X*y%$gU$`JDc)Xn zrs5D=#bsjnq1amZKP3vJUx+4e7ji(j2-Civah74RHK~`ErhxXT7iqAd{{Nq9%YRf$ z(#|tMMjo@@e=#H(Ad^?Z2UL)WdvPmBY$ET~JMWOw$}Xu7&${y+WH-3|k8S$FQV4&e z_`!Sy@W4Gbi#vgU^FN!l`MDKuhwqKTZnM>xv|>EdT-zct>=F(JDzR)+U%Ohy1(@W z6od!}8u1j-LaCat3K)dLZzSH}M9kjc%7wKKnalUG>GsH@ z@-#cezgns_ueJO)up3nmkn_Sx;v+m@#8n(W-a=z(^ix2%aj z<)kuJBrV&&&pK&YMXPXUmZd>6AUVK}XOf+%lMfYA@fp`_2Zx3{L%f0;nWSki=UtmX zypH&q@wR+tLv=N&a=!;JnG&FLDl~y5mBGwF#Nv=3TSf&R&6qO3qJUS3`e$YL$1KAM zrmn5^$p>nTPwa5$jjZDq@Xfuram7}uvlKr`82XR;0x}BLkI;BEkgTo{%pihSfxy?iFVQgQTgk>Axsh1rogphAC$ z@2|Q2q4t$i!y^kg=n`Zab09fbpa9&+CTiWwzjhHQti*=ws{ESL8Lp`%I|*Bu(UR>; z&NM{UP)SRXz$fM&VZd$*RPYxO+c=I7t=ldjdw;kH$-+HKt?9_OT_e}k)3Fs(8UwTo z@ogENlP##za(Xk8`bX|V>{TYS#z_ZP_~-?w>?RyEXV9Lx)}L-w9aY(XN8o`f@T%3R zDl^VyV>Mbevktu(>N9fSA@+#NwyS%QTfC=ZsEA!S4ToNc3X;H`7oSW2WZHiY`Kw;6(gZNbapg#>C5*tt2G#@gI)TqBM9kxo0;Q6q? zu!cGQ^BrnFc`ooX?x7WZ7X>y~ClE(?o?aa3lznVp=;9u6GZuBJQmtwC9uX!Yog<&B zW3VSwAmV2ig+WCDfHCK|o5E~xl?}YgiIHwg%s)mrU!n>O!(lq82vwzJ8_-U@zyC!% zHqcJUxyhUiaT8}XG-eUSaS@s;JSX+g6WcD7WX@n|CS;)uK_|(6XX-xRj&;-;Z@BM} z$z20%&sx-Hf*#{aR<_1;CNM1vgBDiIK4osN=aOA4E4UfyV_xwQoZxh)}3?2)TrK59} zMtE8k9iu4`igDgKiw`3)_fM0l~;M?(=7DPJFgNU4>K;O8ZTo3~&wexm9 zS*Fy<+ke7CRm|Bqc%rv0m&MJUB9$IPeV+Ap5n9w#4EK}nj@!J`j`MH;JYH#_{=#uk z0uo06G&)r@++ELRxFeHWxHS!?BR{(RVO2mp*Om_Tm_p>i#55_EkM*b+^I4w1Dxd{% z399>1%7x_!#*z2+G=r>))>uadb_O`zJ>ez zE&p8Zz&`e&kq&~dJ@~E1_KYvFXU<)rknHC?sk}KV=2LZYxb9J;qbyk$F38BJ`v>9* zx>%nXPMPp`Qh6|0mWLgdPqnwJPKlvz?$@@u{V}eo?>$J6j*1aSY1-x>XH9V-vgg0n zwDq3~%XtmiD*nld$AX+d)vIStxsRbIkvN+NVe}G4Yj27`G+pWRsd-N%N6Z%|~j|E!fF=J>Vw?5Fq-rVOp4k zpe-bk?N)#VKx{6?SM{lcebAQb#n(tv65Kd|{UsPACwI`M?>NbhB5z<`+(#$9K2=rX z>65)JRxeSA_|_u-oZtf|5{p#6w6{QdbhFjD)SgeK{K;nvmwJd>2*c6YxvavC2BhED z*FkG&GKI~(6xYkKwz#(^F&hjI%d>Q77Z=g;cFBPLCrr{ zeejl*o>#vb3LQOr%sDC)@!id)hLnKackV9(U#Fv_=|J>`*bKYVa?m6}^gy zgDPHfjZZRZ6Y)=|4zLjZrgyL`mi}=4g`6a!O5&=@KATg4;B#ZhFof&q8;fn}=5yW) zsp*xYa~$8dxAl2q+jmip+u5eA^759tAe1}C1n_)_;G<%uDERQ91W zDTE)(A=sQIR2~>Yg^@$N4wx}}T@5IG5dIBTUQ9~JM?xd)xzerb_rs@J$5dspa?;$} z3Ne8{bSq6Xs4A-MwCa-t0+b;>d>Z+_sn>0MyF1xY=Js*%9#B1RuL6{OGF1B*{_ghg zFJcQq_NOC|$o3BIFvAU`ia)WEXCulX9<&(Vdo5p~2QlI(GNj*w`%{*p*$KX-kZfr5 zY7E-paSW)VuXVAsG?FMBi>Q4y1;*M&=qRi(bgsWgmOayr2eyb`SPLt%&;VNjoAjR} z+-g8ZKkAjn50-*oZN}WD zvI_0$w4XMhP3JxF{c<_1J1jGp^P$TpHN^L}`f#r^_!PrXv*KJwsFpb+FIUplAWWJ? zs45DzBhFkspX}?X(HOEZ(Uw4=t6Lv}fKD___6?40b5(Hz>l4EATmiL9MFS7-T)b|} zvp3u3J$Ghbkxf0D%>`L_ck#MVndLzA+VQBCBq0e)0^GHBv6|~#)JhEJ?NeI(m&a;= zFLX-%P61c`HWtr?hao&XOtQRBi{2NbW=nmjf80W{vYNb!&SzmRssU;9(qJ^gY7v^Q zYu%D=@+5J(piR5v8JU%#y8r>CpLk}=w?aT06mM|0zDd5FHwJM#Ev`VrlHm=wf{KDt z_+_QMRzBm{I93!`2wiHke&p?JDlKVx7nW;mH9nzDru_P@x=5nCVCmq$q`~w@rN;Yr zMyi&}ir{^Gal@b(^%OEurk6L=1oKoikA-)C%KZ9i#j7V$9;zoo9|0^Y{;9VTc~MdO z?jDgziJBMiewm}|u_XLRBPrun2bfPLeKhvP?oiqI@cF42I494}q(e+vniZ42`R}D~ zgz>J!Zr|7i9)= zQ3C!joWv=FA4D0M>f36*l(l$)pWtb9f_XP)|0}a1TGv3h0`kl&z5aqtXI0|~SX;e( z#%HMXn`dF;=!K3Is;26{81RaSwtKxK?*m#t`g6v(`Co*$>kxAC<{HhRbfBEP>MXkZ zq=n67_shr0>Kz3mHKXis&Sf!tiq2dOjFeP;u%LWwGEM}A8J?vSD5^n(M3&Id7^_~o zC)6z1w}itLt;#^ApL3k1a+nb9E>L*0#w(n_tsz!i{C_A;4RNn7e-Y_R0$+@uGU;rV z5mxGbyK=JrFks&QORu;D_=SvO{GS%peBr9pjVO-;c^_yDfANKam*9`bFC=ET(`W5S zm>0WT^~?NeiJh;B?;fg{ON(pk(WZ>m$qfW}|6U#1kyW^%nD5gZ>~$5uH$_J)AYv&D zZYwl>=Grwik>>epG@+r-fGP0Ypa(i>Jt_XUce3YAtCyI{C;d2ZoINDS!@iFm`28U3 zl~#$RqxyyEscDe{g9Opg!*-d}w@;O~1h~6wj_PMM#qPWwLtzV^T+kEFBYR$ZOgo^o z3q*|*Q1}WuwZq8(k`)~ABb-{ zI0B%vbQVeIV_X%TX;-*{H`X1wZYxi<=J{1%IdfO@Nlt!KgwU@) zPaf0?DHIl3Mf?QsQGRhw$V9n=T28TG$o+NkWLzt`qWnx}bxE8$15XZLzw;@5^S5#e z2DLCJOgt6Tw^Mg*K?7?O9X?o&+^KGcN+_S;vS&|;S9?hFYt(un`VtwjNf&hK<)XWg z2+W}>i=osnEo!&%S2Dv_7USRn~*2P(-|jdzJxsoOV&)_QstRD6b<<`#`sw}7a_1Tu97KzU@~g%ks|ip3OfkAf zvyG3elc4%5b$=&RBC}VEqrs(60lpMB!4#0_-a?~7TPe0-)r1Z z%NGZ@J87D-2}1%-G5VK;X(t^DKuYmxb5~2idN4z>FQH0Y5$aU_)(Sc+LReY09KV1oVCaSRC*;f#6JSJO$Dt7xM@1fJE)v`?qS0`n)moqD6 z0{ZwO?5DILLaEh;mH8*4l2h+I^iQxM-ss7hdE91KDjp!$sCBZ|XO0s8QE`ULIEq$R zH*=2WgX$~&5JVn4(M0ggsYI(L z7l7{q(O0;Se`2;kc6{lyC^Y?i>HEMWQll~u5%DE*{OHu7%Vc9r`~$>z_>gRA!O)wU z)l~mt@9>-Iq9DR{c?DRY(+!IVCe({B@kQxF0R4!KS#|2uT?ek@eIlLJm9rY|NW_Keu0FQyfhF&aGo=3p9c*-o1grA zCYxKX^D`5<1AKBzz5K1}Qn4^6;GL#WNoSKv;znBQrk1xMT5s;Ri)TKAYx&R`ah(D` zD^NjH*zh3)l~J6T=vj>n>@d6W#8BR2JkpTiz?^RM82$x&$CVuoNRY};Vfj}(uOa*m zj${u%JdIqaq{*(CVlmtg12Z*vvBZOz`RZN+I=T#g*b9hy)ZcJ3z*Q0d@Y8DGONjjz zFj2XTE^ek<>^^v1xOjU+spN;@InY_zmu#uE4(isMqX=E@u5E41_S51wU8*$hC*xKX z4dF&_)K|1*culyK6_}j}TrUh^SjBU|04cn?=MYao@TwAt=QXa+-AM6M2Q@X5WV;Dd zu>)f}Pk8_Z!{R4@`o&f~IFW2k;8Z7n9Cwxd;rG$vQWtqnvIsZ$j6GD4K?MT6eF6VJ%G9k+|3b!0kJj*Ppxu$DWKtAPC1+<&jauS9vXtIp#nK{M3te2U%(e_aK8R%_ujiELq zOLSu-y?v)&W5kFRdBW~oX#zvDI6HS~dZ3-CFcs3#-ox?WRhd=VIBT!mE0g}tXj`kx zShRQ8@ZI$;e$+psZV;oIBWffxc_$;BdB=ln{$~|${JE%(WnkxtTk{6f=ncARwKtUT z-YI4exn0~An-kh_q9vNVC)_b}|`HFWp*}ZLd;VtyG3sr12*eJRSU7h^g8qiN~<{ltEvI{7h4Q zzgl``E+)QZOJk=DmEA}}PW~nbNoQuI!pM0Cr<`}&6=N`zjZ7rD1)D1cAE8j3`o1xK zFRaI&S96b1sOxw=jQaw$l=>$PYCJx)RRN*1Ay!iV_^v)g_%U~@NvB}c@lM00nz#wz zQ>oRkiK?(>N?)-{UQ-e*gF-4We%0sYqZVRApfn&o*TEG-wSk@R*eTH}Bpf{uLLO!Cdt_;yXw=(yPTWkP* zjk{c%{N7~rCScLGg(pr!gpW8{Ky=o-^lN=ngdMDTz*jCVW#!28Z~c!xc-4xyR-DRK z|5GkWq>6(peLqLW;yB{Q`r3S3E_j|{0xAjIEE(95YveY`EQf#iy(r!$squdWZF*q!(8D1p+R!XXAIgWsT6QQ?NmgG!nDAVL^6GN;_3ni+Az<@X5ke=ujfcCTci=Jn}4>4N6G_8#aK zsWeTC4&Mk%A{4nLPO9^-kc!Rg;s(>#M1#YYjxs42-{3wHS<~bx{u9mIRo&J_7ap5h zkRpyM3eVuKG3I!+#a=NmvX42$ZTJEGNLoL>Rmul&7-ZUF~ zhv%4avwc|BeAy9^>xvO+xXKWx{%bB@|J&%1-H4H8xye2ziGgx@RitOl7%bOw;hZ0Qj6bv8L&nfJu!u*nw&GEc^@l7hX?>p3pKS{8>29C z_Q03S0zs`+@l02-6Zi0F6p1_GD+`3Mf?naq~;{y>MZcil{luaUVemt!3XxSe2rClXE zhHL!Zet+Os_1V<7o}Mwyk_N8mD3h{8*Uy@-^uJ+HpoYk-fH^N6RPZi6%`;6Mmr^A=r6J~(*opI#*p<-YbO-ijqP~p+E@Uaf z_Dpl4Y4AhrCyZ)bCufgR4p4nB_4^()LU_^9>i*a*H+>=4QvH5RW&1@HKl$Q1sM?E5 zb_=;=@^7%kl@|EtJz94`*q5+`%Mp>)XBFkE6EVrX_dlhwCqX&>y>D!I3n2v3txZSXYS z)zsHtw1XjW`S;9ZXGFf?wFUhVQr%5>0)T?JXA_=KSFHtHvRzfy}QAys))$ROf&kiJ%C3Mk%u?)w&T7XU)s{cTn@D2U9T`>{i( z>P*oy_KKb56IP=}68j11T3?6jFZJH<4sJ8;grUhY;_u8Uz3!Xtt1$V(57g^Nc79c4 z-?Tisd=hw9PU27S3Hf{E0PgE`O>GfV-LsN|O5esRJ{S~PV@8jIx5ypGKTCobK8Q*n zN;G!45GXc*#p?f#au0G&GMcq}pbZJc|B!eZbN1s zw7~n$utbh^EOHY~%~`*!wyqSK4bAuIV8H?~es^mhjPf0hqT4(wLD|j{yx$h^f<>?n&|4 z{hX;LCz9A^e#|xpi#Q(a_I=n$Cg+sabwc75*~x?OBttAE)F5B&Ewosp*hKBL>psIf zpYM}O`xgrffPkos+3eV9)1im5^FakL zx{VvRlcEvE|8!_yX^tOcl1-@e{63RGi-wP`F7e5a^u)Ol+RVLCPpoFM^r}|LvIYgs zBni^2q9+DDU43{|{=%Q`rZKak9dq;|(voDIKLgWZsCIC6E1wcO@3cJDi+6P}NcB*G zJHBUKy+CEedZa!0wjDQcu(E{lDh-}xyJTq6uz+hDJ}nFXgcX!J`F_07M=m^2_A}ZJ z2qb_DSOUKb|Jk!HMq~(P$EqE%3&I6&75CzCPqbTwH zh{4W6be{EP9u)3&9-P;aLGvS+QH7J#&J`wT&^&)=ZA5H6C6TA#U}$UIvAKV4eqFm> z0m^>#hdeX$t8Gsx@A0N%(rJ{a&biHm=LTp<1l6La+Rd?6ys+Tn^g>6jd1@4;fqYt0 z`N>z?g8iP?-}2 z@E~kgj+OISm_M~LA%`ulA|TN`W^1?Hx#r$v;v=V;69fYjMglS$Ggkg+RAgbIU2oB~ zu*=9bG3M-f4)5f#ipYR*4x*3O<6WvmjvcZu8BmsToLp+x+skDjuX3dLWPxba`E)9y z!l5pDnh9Dg@%Ld>w==X;BMu%ZuI06qU*8a9OU8ZeM*n z_+Igvsz&CAiQYkHvq6ZY^t^hx5*v65v=zT-uCIW5!{QJ#%wLDf8MhxgthwB!{%oWZ z0mijz(5$(nDgF{Z*lW+Mt~SJve90H***&h363+PxJkY*7-Mmr!@^oT$5jct0<+t}s zC(_}5c#dg)tK9H!z4o`J%^eNiiFR{^0)#+5@L~NM9H%BBU|qSkG?-qjhBuD?sK_d# zy^wZGm#_P5`Df)it5hjp9B{Y!Pfbdz@LBJ4-msKt@Yd3gB=U7V1xpCi;gW#2-LHyc zHoxq&@P=O8R~@p2=anAXPh+34?35{TD_>BLUc}?lkwpgn6u_S~p=$8H(nDt3hk&YG z+Vp*ydWt{Q&_nz#h7_nA1Pn1jMfwl=%j5SZK?++t;;nA!76!-8{O3YVB* zYE4t#gYn0=es?4Kk3CfHO1p9cS13@U6Af&_-6bVfYPK3TjZ<~8JA!{^{RJz_5=9!I z#{vX!P4I{WRnCKKFADkten2Ijd&~6WcJ^6m9j0rfh#4ars1wTd0>zpo9wZJ>o!vjq z+-WHbtJFK7RM`m6f5Oi=sn+c_uxen^+bes615?VLx*1f_5lsvdwDN;05?4N0akr;9 zCtOve*&*|eF^}F>{BWL~wW;#56|Uv9`+OG1WbIIp@>J#ZS~}(aT0Wb&`vy+x=Ab!98)X=-MNfEGRA5{JU_&joz|iXjm)?9$-QI#HplO5u3o zM#qHCfmrDc?r&X%`5PQs3P(|W*Tp4{jLuvmx8$-iGS+}v#nJi6pR3kpuFz}Ng5KwU zq`leTYntQ(*D zt3*MO+t<}UJy^B))bIKRv$bpE98$pC$Ddr-HxDa3FwJ`p^&9+&Bqj-%gHHKrUnU4f zY@{eTs&y-MV5Pbh#pL6M6(oc=auFPN-KhkasW8U{U+q@M*c{WV63nt5AEuMX^3Wpw zG`{}I;7ZUSj`_cAbHYDyl4cBF`nlhwHin3>jsnM)Sf`L zf-Wm^WV-CJl~-FG!ZX+WkMJa@^YROGyrZglnOvPJo$oq}5MoGrd?WYaV>G}+3)v5%&8a4d_LP1ABm4s$$^U~ z2A!ZV;&r?gbfa?8K1V;Prz-h@k2Xn$dcqx{;ffYT$u=s3on7Tz`dQlNslRK6`XO4; zT5F-cdjn4#$vTj5ei{4fa|wvZAVsxqnYNWT)QTlb5Wzv9vs1EFW!Z1RTNg|)uMBkt zdKf9ENELoNJsn--w6LhmNYIEhzjZi>4y8|cgR@~Nipr)qDk_GLJbTezv1w2^`4Tj$ zqx>ZL!$>99BaoMF;J3}RdEps=NhK05zVYi8u1$SkEM8$seSI98zx%dM6Yw@>E6c31 zv+2lgP}hf z=sB3{#)cn`v)w3Mf|Xi#JA>xe}ju~9o$*P3lU^?=CK{Q@?xXIi0!+D z8Jci5>E)b^OwzF5==m&Vz?FioUO(0~b`gw3R%572l29t_)f4GC^$@178=h5gi(+p$ z*9NX!82caBHM9yB`6BwT$u4dL1#SCdesfl!eK2zTh2=V+r2p)~5cBJ@v?7KBv@v`z z_T!U|IU6YJ?8_Mdc3mx|)3q!?wYAM48yIGO2AU{6i>>t+ljAYL%b)z3*#GhvfO%(W zkwU0v5b@4T!(w{ea9f4)Fd}g4LSv*Mg|HETxfbz=*A|R6U}PM#Qv!J+KgEhnBIb8P zZAr#@C7-~FvT|zp$b#Pfy3;?ngnjHl)gc9_6?l+zLMp^L-kCBM2*Sb1V}pX}5=c_G~u!ov)KOlLl;d?oD=6 zS>dsP{`hv=jNDD0rIrwYwCKb)Sqq=Era0W5^85B4N*qp}UutLtoX$#HGnd-U7Mf2g za!AM>fOyU^(~_aBNMncAqs->=#r!Jl9;9;BR--Z z?TNE~jIUMvH%CzUk&PdkL`J7tN!si%)sgp2Vl~yOUU8jwu4$iXG8`F8Aq6Zd@|Bht ziERG=ogP=UMjE}bPd|gl@e2XH^WVX715*69r@ACN%`rLgQw}<3@JU=aP|=O3$u{;s zQe2-x1GrCWSZpo@%KBAXWq_A8E}kk$*6)#cd>iVp5a^ot(rMsa{x5?B{$~(ab9ZC9 z5(^AS5Wo`1{Dt57KYtX9%!l~ zR6-~4e=c15`fa+@g?72sE~-R&i+nb<@(@ba$v+`m(*Jkc^}oT5>5b<0PrB*Tul0R66766)FUx7p6MYCG;VgNLjt}?fk-H;q1TjW*@^8+^b^Zmo_EdhtClx`rAaKmL;@abp?C@ z^7c5iCu}Iew%3;iCE?U>v^nTU=C<%XLw7o*Jp0963ReOq>STGWc0Dtez24v=RkHJI z{CuJ-YwIn{SLN>~xBhOg(wvPvf`hbqf36uSL`i6KdKA&fFc^P^^T#ga@c!yRI#!S? z_c}$LiF&j78p!uOZRuA80OTtf=T5NJSTWGg<3^cCr7GUw02;A_x0b+j{f8c+ z?ESb;Zi-_ye3OBZWp%{`E9=IXegTwr*-k&9nT#KI(>ylb;CArbnqz%e>^N$AhN7Zv zNmoTeA0n)x8!2~Kv5wA+K#JFlCmG>LiKbT9yp%}@!RoW?onhF07iZ1$dE%CEO8Q*X1lMihwcro;8T|%vQY;H-$9iRAt*^ebpny) zghB`^GTqP)TYo5Hz0){=jjjdN-VX2*lVv13^og@p7*5<+#*+Z$$cnf4)&T&#J;mj- zEJFw6kar`{-Ufv!=QGo2)2(sAkV~vP!;LdFq|A_#oPCh#r>;6)_zDrXdOY$v@6vHH z6bEuKWTIG!L^-_KRiZ(G85h!7CMeZ{(FL?FRb4JBnKQ$b>zD4*rrllC{hc z3xBHpb_oe%B@N&>!_xl-n0(Da)szW=qHh&q^mImB%Mv(DBkP<6y;?c5m#9}HS9y`W z)TJ_Zu<*VCg0w-)6(y>fZgLXq>?x~y3?5F3ToD&rx`{Nk?xC8ZwWR#II+)_2oS`w@ zILrJP>sb0d=QX9aGkg{>%sG2!L0i8548o@PB4;?_JUgKL1o_BxY(9*k;w6hKDJ#<{c@55Uj20y*lGG zmN$Xzv9CFktWJ^o`&njgn*#44*12jem`SWB-Gt?ym4OV8zaqzeBo3cg9^LVCtS?5? zGh}Zz(Noowu$?XA`-KpKmF#-;49+R|TWL%FIR(qYEt^G+rk5`41H0e><~{V^KVrBq^7JtOlwCv@KJQzLLj{ z34|sfxx4OPAXq$IOuFF_G)j*I| zvSWTK*Jc~E3@5f{tEUH1>}k!fe=pG}*Ww}r#BURnYw6KPLqxo|)=z{`gUQZJn0#rv zE_7*ZUnz|o(~C-|M+=--*iu9{yzvE zRTb>dAzqK)$Nw)rZ*2yt#(J6UFwg|AiBD_0z~rv_2l{@90WL`3xlK@NEt|ghe;m5) zQXLdOj354_CM;mX0tf#G{QDcR&)v2;z)5>0F37Jzm3gIlfQ|l(KDG9x;ucwGfxc03 zY`mI1>4d<{HuqYEQ=?br*iDMye3;I_mJWAiLQxCx?KxMmKAFA~g`Us=85MGrVsYdB3o*ZdWRQMKel;TO^Q%qp#?omS9HizUE%-9#(#E zQ$H2nDmnTju*&s6$v-k#8J;z}#3u{|cTow&#%qmE46`U!CnR@}NU(nX`&46%U$RF= zz=-sz5S&@{XOs}3xHmO`Q$**O&5}}c>g^V705vwm+5Ih%|C%c=KT3F3@!!`=){PqK z3^-*hY1IDL*1vAIC9^D`Wg4(M`X8&n2K6q$L?4G;dT&M9q~M9+6b9_z;yi1kKu z%jlGBDYtc)=LKcP9Yt=@Y+GIx>Ev5yn8BvrJ_5V4k@j1^@;{+5;| zTs&N)9K0~y#Ti9XhR53eVNFYnkMS9&Nn)Lt3a>;+^ z5$@azra54`vFNLlxYvmwa_KHtnqTSU8>UA8Z957#6+_|*6vVP>Ut$3;6un&?u-lzE zNbFfcz06)W_N)gu)-sQBr!k?WG=ey_SkT1~c68g{8~{Sh4Y(b|7AyLlyDYWN7Eh%RJI=|wF*`LuBCGPQew{L)ibrO%~ z3YQ+1c08)F13glbkIVR{YLRElQ2M52Qg@^8CFx?YZ39N{|3L|@vgz4+aSiUch~!6v zg8yPs09#d_d;Uix9EeP!I+yH>v5s=`O#+;6*(gXTaa8C^JJAnGPZPe6Lx6)m*p#G5^;V159rs#4E9zgMp_(q zHe1L4mjCInP3Kk8ONY37c_ai~n+#CP&HGzi?o7*cMgBRkT=+1w^QR@&Q}Ak?;f3G-Q<)rNJZS+KdAuo{}H-;LZ}@c`J2jf zZ~k#Fwo4$zFXM1;G!A_DfQUMDy)o`TojXL!$|kyuYm>9wdjc$VbMG3IHAO|Kzzya% zF`s)a$q}mg*1uWl33(3V)qFoDMV|#~c*Bnxx~$~)ItAw5{miurxZS-=o~a8)@N|3& zj{nnm(WX=8RTF507J04%pm~z%+0=QmPUZbSQ$4X?!XilS(S5TzrNLCh1G>wd1y@){x6Yn&_qWv}M#eaoudR9| zSFfS_L>pe=DDt$Psvk#4(nnl<$g*vT;z_?6mK9SKQ<@a#od+d2FE*v5M~QTX6f^Zn zsfN1H318ZD{-@iB+x;!xZx;&_H(hq)fJAfd;U7uO)G1?6A8iz$)BD z9p!Cv#{WOaJ#wQgX_=KVWnLMB(;x1&UKMnn$wPu=c0ZE{S{jxrZ7qkVnC*kEsiFrj z=X0@2MyIVR^b*CT=ni3Y70`Yx-BVwb_`0nuO1L)iYA&RDRhBrfMAuS|^<73$c*R}L zPqUM8P5w;saoyl;7Rk?scI98pg}e?MqcDxEbWQ46kiOJAR8iNH+5fBBiZOxoKgJ=u zVq}`pHe>=-BkO!Q>7TOmWOR4{l4659WZ7F2g~a!EjspVXAlw_}&MlRnjhvA(J- zQJ~YI%acn8_y+9#>?=?B__M$IJq(~m1p|PCgM&eWLxBBv0000m02&#Kh_bO$(4T^a z0o3`M{aaGz?+pO#f2&|ZWDSv?6e`dr{k(yz(B6=AK%?3st5PeVvhcwInWOv3irvS6 z@wN@ewS0gBV%u5sWJJRAvtmU;4R6X&+0d`CTGbJtB>B(MgrL{VzyL?X`6D20CiO|x zX$ZXvm*2Ly`khF#rVk~RsYG65YtB84PSNnB=9Q?)tcCW}K1gQ^58fa z6=Z_mu2-DZ;R_6O9ykg$f5#Ib1APMsMQ-OzAe?o~fw<6(jnR0%C|Zc-BZtp;TAWu( zjaYS=YDeu(Du0zfB4!|VC4k2EEL(ED2JK|Ic1^3rzdn-N=Qxqy?iu^(-InU?Jbu9m zr+rm@mDHMaFnj|nD0PS0EDF=KdSuHqf#YJRk1H-1B~U`6a~p)>_^3=YERj%$mGM;| z0wT336ddSVi=GT4$#iilz^w!3J=Rw{qDlZORN)$VCfF zxHQoGl92n?^t#vjSa7Y%(RL)@=9v75ap}Ee0a_+Vzi8@^%P4?!YS`x z=6U!zC|Iy-#uw?4Fh3sq&=)J_UNLljVn&SQpRG$1Ho4VO%@$k7Q1q(e1p_15Z-Et? ziS2{1-AyZ|rgaT2ku)Cg6AEv7gRtuS4l%B~zyXGVw~X#W*_7^lu#b<*tRhXE`1(iO8E7##jeo0&yv(d-ZyZ3)g$}D#GX@ z8ZECf2dST9+w4vH*Ek0!qKznTm@-0X8Gt$Op`Yg_yMbjPgbm$A@!K zA+tIZDk5A*luojHij$f$zw4au)^nyPy2QZ2mFYi| zLTDIO>Z5m&90VKk{fvsncR9U&z4llq7s5d9^j|D`zbNPo(Nt}CXA+K@meLR3DVuxf z9sfR|>Bp9KOA<=joSD2DS3ks&xcAy*g+}hcUs0hL6Wr2`^KKFjd2@M2zT058>oVs2 zeS&eRpDiYMiv3KKqBl4BCmA4VzS*&dE9RFEtQ)N`LldoaTc3WAo*{Z|_BH1Ik zN@L5i3Wq;NLRJ}84H#DRY>WRs#X8A2HYg?2KZQEN_oa+5>!0f;cA_24*%R9I&X}dY z@X$k(k#p^R&kIC|32vzT&&dJS&bRro*sl`uNO5U4U(u2mx(>C@=lsk~ti#Me?IWWU zSeU&54i`NGq31J$3gO`&PS2*oJjj6ZA4(G0*g4F9# zisu?K&ZY7Gye1tIei)u_fOT%Ui1jyM&UY=kpZDnnQTQtUbTsd4&la2Q62$XS-8U_#&fi1p3h2z+nxuda z?^09HL-R$U4jevqUJIK;2covx4RsF<)uDsn{3MuS#E%6i;e00|IP7GdhchB%>*x{X ze*LRevcsr~j_<59NVqaWeIn}RYrZMDJFzxDz zleww;O5EN_{iw>~9s6a9t(tu=BnpW*w#^K0_YLSf{C$G3@9=l4H+W$U`x`*q#9$qj zWhv;!_Ah>`y&@qOIY}fx%LPexO7CYG8nN2G%1xa*O+CDQ6OQOwt@N)lKG9XFrQrc$ zqIGVSNG&Jz68r{beU*7 zSLRYqRc-VnY;i}@1l(97mMon(1Wf$li6;e)HBbXrod!M``YF72y5%5i_N4@cj#L`t zv29FU_$~B&kn`V$_E;d|3%p{H7Vw!iim0ij$Qb`^m}tnQFqKbBLSw1vErN@tp5)5e!)$F;8{6dW#v8Sk{ESMoutt;z_!bl>9I{4Tl@-c@vCDcM zJmNX`RUq<+K+KRVfU%BIj#jisjIH0W5%>0etQobAb5IMwC}Q_uX>!2z$5dqk04rRG zh$EF5=ocH!sxT*$hU!ktmrV4%Mr&|3D;^{saX(Yvw)m*l_{46B@7asB1)KbIezG|4 zXaC?Vv#eb+66D2NEQ{Q2fuUfWL1#UUXJZ9M()B!)c~7)!WUrqwoujnJjzA7UzWZID zjAdloP=#dhv9C~6hVw@U8=Hg_eFOeFavYo%MkPe)wht5xw<))$9)IN%bxH}Hpjw+l zz!Dxu1sBPdoQ7b2lpf~CQWB>9ixZTE|EG7dgQNyk`KYyyp$}6!xo|!_iIzg3wwh07 zH$IuXlorCX#Z)^PG@PmcOLdHG&cILA`&+N89h4@KJmhGb!5BAX>!hy!#`E9gf4K4O@HfgBwI*KV-YYkZpBO4-A83Xv*q^p>?q)9X-r z5@~-0XKuxtaxk%SG?!e|v@tHE)H-BM|9+tmMdp&cM5 za{UzwA<1Zb^&WkqFOK>???&3s#2S58t9aQ4V?w)9UeyP#CUnA) zC@*c0aLihhI^EBT&s((ha!EF6(D~RY*nR6@zfG9eA!efe z(?p;98_-ZxPu!V(=U?*;xG<@hITg+0pvOPPY^R9w)it3791W2t{2KbdKc&H*9X(BmJF62tg#ZZaZoXBQ~%Jay<+3 zy9YDRiv1E%XWcEN@7FH{f(MgC!{_rk4KrFCAfkDky9WP~^FW}BVQJ5mf zi_=3Kz(*#hB##=R(Y&{uyzo4=6jY_VW|DL4YE(i*|)b;|~{6*fEB)d43MMfz1q~G{>8=TNdlay5PpUK(wB!7(&9$;Er*1*fEy? zRz)4f%~%f8?8!KK%=7*Mt$asOrGCyR57?X;fa{zg^Af`rsea9Woe}>ne0K?tUp^Pc z!R}_%*!1t+`{R2;h^0%Pl|jAFO3OH<=;hbrg0P$sCSO|8^~QEiOinwW?ffW&ws8h= z2z#@p#jwPlZoJz3n@%de0I{?;nCBeI*=G<^3z{y{qz3$-+e>)1A(6PALwS@yrqQS7 z6bz+f#^9xPZF-}3T?S;Axr$)Ze+A#80@i$ERCkc~na;y9(l&sMKw>P5g{43zpl>}m zF5~j&&kmC>Zde3{?sD6oPyd?T$clKw_w5}Htef!{MqzH4Ui z0Up8n1%fG(0Hmc!XS{t;_O=4CS^k<;M1oc(X|A13DQHK;W$opIF#~IU=fA zhG`@HQIX%bh__p9DD2QLhEl`&n6mpe<_DlXO69_n@h`?-wPwupuEm&{vSN-L^pMCN zFM`KxzYz24+8ObV82emo01p8dGngLy@L|(f|0N_G+{>W=PebYC>pNyz5O{MHL13Kx zi*Ucy5YKVRqd9FUf<6WFZ>{Vbhkdpe&HTS)pTZCs;5To5_aSDzI5)UZZ*d%l-+&P2 zt&1m?71)|8*plW;x6A)>9f^#Gg`Pa@1V;HQ5dC1ESz4-p{U!D)-MJGp^$O>FGS=~3 z3Z_D8f@Ci`Gn^dghW_~}^+n=>cX=}N*E-HzuJLZ*-#TqvEL+V^u#E+2I)vCmFQo+r zT8Y)7_0g5imdU5$zvw98F1D`Pynau8K*vIy8VVe$4fMh z`R)$pXZ4THs>0exmmeJOs^IsIfi4UsLr>SA2as?B2r9Rdjcs_w6gk&5$`A}r4cOUd zrFXvePvOx>2jz@|2|f$kOV;e(g;XN8cE~3ujfV6Ng2_V+w2qNfRs6d}05a9Wm1wvL%>8pKHqjjyTAtk+3754(s1-G3po9=pzC;M=}r zzc8`7ZhR||2zC_u<~2laLM{ds<{xF2;WpU**zs$oiN?d@G|KI!G!|13i#iVCv+`I5esveu+)unlmgYYJ=7o|O`@FDaA^O>F`&43!vc9M z)-WsrpUWK z8lC;2!}|hwv-Ofb6f;%2+L(0p>EPQ^siVV{c4t`_zEb9T8t*uJX-sFI-TNeeID$d$ z5MlkQq>{d}`C)Y5(mygQ+o=sKz4&lMA<8RsCwt}m1A#_`fvF6pal?*p068-=t39rV zk+ofs%H~}P4A!iYZOwXo+WKy^P=}`}np0E47RY^-?g|&pTWB)X+gcqxuPJhOSL4Q? zYA(yM(I|oY3`ge~@1#1+v*BrI6Ineu0sV}Vefl&ajTxVxvNYugwMg)Sk;l8qecMwU z8Y{XbdLqR3RN%)Ah8Y_es|S*@({q^M?mKP#-~a02r*O?R4%gTj?9*m!_xc~!@Lxu( z`EA8?a)ozA2H8-_8pMd$h;4jQY*~=Yratj7vlckm z@+20!aJ-Ok`_w(gw(Q_)*CV7uT$i5S=@D%K9c`CZ2zwrnU{umOH{&c9F4^ualRv4< za%HwG#=#kn+WI=zY}Y;4L}OX1)DBjH$mjb&Z1g6CKp9&hVZ8|Erg(Pyeq=<(9IjRS zRaj?gboHx^Dq$P=dou~j~B$J-dfsz?JG@1IeMjvbNxi|wYq>6d& z7PGTnM_D6e$DDJa8UaUpGbf$)!xBQKNx>`SWlkwZXYl4}$^*wm92h)U8PGT5WG4T& zBC7zh!-g;7Q+T9BPeRdV!)*vK!4t2|?^cxVVL$C&VcEMT!9@Y#gVXq*rL zHKo~z9K5sZ8NJz5UT#+e`lag@GMa(;;8>9WvTlCh1x{3Fb!H+hN986dkcHlnP=0XYPg=02q(X!O7p%nu)aX8GZRN4~t#>w#r4zNy=IJs!A^HX+OV?ih zu{BgKjSsF}7k()<3Jwjr%RJ$^C;2L>5hYy-`k7={hKCeR6bqfkt@Q@$$@^_|Ck;fDeO!=%@i{E5 z%5fFZbR&Wdu~?@EtRRGM0H|dCr){VQ=D!`hw~Rdmd{4f6l?S95Gky4x#7C@K-69izG;B<4iAi ze-bUL_X2Z^zbV3(lWXE)4x1!K>g|E0p}HET*QAr1X3X2Mx#JN6v3;;%0K z1`x@Tb25Txhs3HC)l<(gDo=fN?;7jt!bYyZ9L}+ZOS#|1L%<4mwG%6xJ!oNdXZ$x_ z0|R4dA=|~msLF9eg4as1^-!WdlBTA8ZC1>l)53+HNS3_4d_J5@aTmuGl-`H10Ini~ zCsKloEW^PT_-D0QS4RX5TVJM<0hH=4%BltfjY{M?BWgStIE`7G>)vE60bAjpTj-Xjy0e1yZzJvR4LxPS$(I!g{68Xx>rkEZ|GudSFfu<(uVzRD1CJCKbQqh0P=7vdXlHD5( z%P-M;velG$@Cqkc?Y^UQSQ3bj^j?7Si6)7arpW!wRc7Rn)R%Mq3`OmH5@qI&1D?)? z_q+g`=x&%H8gCktX8T(RpaJs1k)Fl4*|r7ZfB5$lHjiy)NPHfY zYIBd%krs|H33NVI9R2pT{CS8XgMnMd2`LBt_{Dg%WN(&4DD7%<S87R6b`B&baM*e{*ra~Up6OYk*kUh z9GR?5bGHC7t}HF=;2`-?Dw)n`x_@YP6A;wfAJ2z(v(S-KXt~KZGpDA$F^`#S+I9AH`t)7wjwDiFk8!2O5ur6!6davEGZwc0AR z^tvBtM%ms9Ms3Kr2ZZq6KPVrTlG}$Wi+trAm-w0`KejW2(z&^yk6zNo7U4^#MmVX|$Kf2+$yDvDl}LPO4L7J@^!n7ZX} zmPJtoqreWb|0HXY?*R-K0EA7`7c7~(Oi)Du`>N+RX;*`14Np`4ju z?_v8>5ZNPoQ*LvogttX74MWOWVhb;x1K_LA>TgkicpZs5)F$`}1lp`Z&_183-4zVM z_@-P-;w7zQc_mc|0r#JdoTT!s+dfIp)KH?;qd)=bJ`9{ulT|3Tp^rSH>O;93$?@YX zyjt_{S{4CejkF&=46F_0cPR=B0fLF#GYTH>?SVS+7FNv9j+JppDtV-a-9?%9jen1q zrg$liP@0*rpEf6L1G%9r-h#d1Z;oSS`|2;h3_H{V!R{k2ENOh^#w;lRmLevF3N6_^ zht)W_uRW+erPfqNPn<;*w)owvVnHjK#hT{f4=;G7?Gcnm(pZ7Kr4M;HcCQY=Y(g;Z#DHtV=R)KnNB%uZD-5R7--hoXl4iIx%iCQuU z_Yfm&KDQjQ0ZPtL2P!nX^wnZ_4VaijR<@6hr1Z$4zkQj8t2RP{!-waW;9cnevWzf> zW*taDu^1eVRk!iJFlJJ?zdwbo9f2y2r(@mZ=v8K1i!{qxp=hsSJD8R(FT*7&W2nI0 zhz)=2ZvZk_>@W^ilB)))aSWlIURfGo!1dO~JopP$zt@?o2(jYrbvE z5Cfu}$N-^%ozMw7f+i44Xm5BBrIL(I`|#y}V~*Qe_uk>m1y5 zL}h?dDlUjv>H0s37xl*~y>b3UuvouD`zkb0uO(-ld?f;uc)M{-tel3iwlD%Q4MkpJ zcDuwo1xaTdRq=cv!c+S=zluKBcn3ex3Gc409TsV_41&Tfw^2NiR|{?5Q!x%}axm{c zIg{7zq?~pynG$n?f_Rd)`TJ-7Gm-yDiAmgZpg#+M6K}U0RjqGSiCex#AxC#wnLY;9 zm$Lp0&@^_U^R2;k61%2rr%Z^7`~h`A;dY>T+s1%j*jJ(QTrqeeAUvNnELu^gQASrg zI@p2^oLE`5tsm~-PF)na8h?i0`Uo=3u&a#jtaZ7^F7IAdkTBQ> zs2(TFT2E7-{;98iRHUnrpR%J_Ss#Ph-V|1jVPnZ(F-M4Zxn<`v3)3(_vd0#b>%zpC zN=bTxXG|K&(``rAVB=Ic2Ua_y6{+KPRW=RNnZ)Z?Ls84?iVx;cVK^C=)}dr(oNSm9 z$`-1Pc&11Xh6+hRQqBD?o77WYvujXrmQTk*1c%p@cAKsq7q2$&h{&5V$2T4H86u*C zUsLLQOj(d7ljJ-E=jib!BzzM=P8uWEiQjMGKLSU>e@tU}OeJDBXjGB9PnKhnz-~~_ z(+xZ}Wi#999;6bZORju2=ak*5 zYxiK{jDPEkQtpIZzz9Rgbv7+C!%lg)9Qd!QYM=TKsiV$5iI#DG(ZA4GYtblefswCF zlNK74srr#d5;}3dL$drrQ+LYBYB%}~+hB|*Ht%o1nw#D?0L>idqw?JeboYqetl(-E ze+JIZ!%>bvM*G`?Gd=7Yv3DYVrdcX7g)rZGjJ4EQ&2klSsW?=9J8OA)4Errizh{iT zLQQL~tnU$fz)u`?WeqpD<&^S6_MX=wprYO>Kh28u&s~^R4Tfn-H+6s^sxOEw%(A?b z66zL&?d)6p#i+MUD!}Y7V^KNJ6)J>u@VgK;BvF0rCj-(*lv zhg$5AEd)tdsK$aI%N=i$XZ}u6oIlR7_@cyL=R#Q8lIB-}7w{oh<%S89qWl3h$PR7D zU@KISO-^uI27)4%^9yGnS+(KX`me>M(K|((R6vWl#eo8B(24kHATe&x+U%h}_8q8d zuj5DTU>}e_8XqX4+}2W_n*v+Ui3aSAGwrF23J3iBW;2Xp^nJ3^)zPzZm(b=0NG7y#3mAS z>>$4;dEEoR0g`3%h7QND;oMMf>N|7I=iomdm0U{9>q2&*(I=Wi280t~J zw9Ww(43f(2FRT%FxrJ|lQA!~7YXO>vm6OyN(k1jo>RZs_&-2*Z4S&?oK6^1#3rrh+ z4tlx$in2o0Z$LehIHWwkq#SzCdc1zdfhq?Gpovf1c#zw=>tdV6ihfK>#CTkzuxIGt zIs{tOwjtEaVN+wDL@Urz(+v`Zko?48=BYzPBqt^>Vi}S|m*<0lr116hSAtPV-i~*hEX2;tKGQ*T4vu^n zK%_$?CsKLF_AkPtHdDo{2@$~mP`-w2HTm~$L}r=9*AeZ_*~G}f7{RY+WgKgl~=!l*3K7Q zc1YI_01&2B5kKs{QLr-{ogmhxgspn<59xT!Uo+f4SWc-pJ{=( zT&)~rVGa^+u|(4th$u~pnOCK=sJFjv?>N(?9ilBLsaFX7C`uYH%FTKZW|e(w5mFXs z@7G=KcQSf}9+-b-a{kD(2b&n^`CHdcn%7g@G~>3zkn&Pf3y)-THUWh?}lEd3k$2 zi1ja7cY7vWv6DY64RZn`7h3ZpDV44s$ni2YmvTh^Fp`lK#jlDG-<%!MFhVsDHG5v! zaq$ZFpNiV1y~CsmS25f#i)`uea=vE5h%=HUTH?AD-uCEt(S>Eb+peYht8g^zx%UA0IN^OI1S8 z17+k@f!1v#ChJ>;g*01^g4Zu!Xfc^L8B3Ksz&e|lH37tFf_n8EIy5U8YyyI?o0L^R zNRyhAb*B4ypy%t5Rxho6-V4v1Mg_=ECk% zRFwsrSTN6H+#`vY)IytdK8hpWVj3{rrWA^l*#2~rPG?2k3H~`wSk_>8uTtyEdJVv^ zS%o-bS>b8keE^E8N zFX{A%aj~EhDI1SoL%YBmA1G-Cb?U_SFt7XbbM6nc7rOt-yoo*zG_F_ReAdxpYd7qTr*L~Ui2rXK0ekNa`@FsLGLw?SG%29qQzU=6 z+JaGO%mpU9TFsUN2Zk;^1Z%M-9VqV0rW>cSQNvQ{#}|(eX@uYaJI$BKs&QQMMftfE zI=q^Bml{=Pr@v{-CX@#`A`r!Qz9$UVl*XHSOSK;(Ky2**1Bq#M-v+!!zdfIaxRR|w z1%U}%@%PwL_=L9olbcIi(_)hP&|9J55++d~{`#Yrfvx`qJ^Yxdjss8`OTLjP(Xz|0 z^1O^hfkCBjOJJU-e+=1*3k(iQX|}yoR`E!~GQ!ayiL#@ejuAdAxu;dVPKsQ%DqCD; zW-dmBvpJ0JC?G~!5jRcjDLEfLjUjQRwclK~6kVy2Kq7>ngc!-g#c?#`+!?Q;Y8FHa zo3PM@f2+BtP8YpToIS)Aiq zVxjbpfqR)m{2A4(8qHD9PW@e6Ac{cJPwYcZ*=t>5EE?mvN8hr?V4gG&LoNS-g>R<@ zRbg{~si@%>?!v!&NdrW_>?@S+BOcLy&*1l5De_{w(5K!!ss%D?#mcsSvfw<9A_jGD z?f&sCy;)d3yGq4-*sWQuD0=dhCE@8A_3yF@9izhPo|P>eTWGXXo|hRx`S-Mh93V9t zYP!EH!*xdcPpF(xQqCv-?n9rdfqu+p0u|AWJAtjb0Ar?6>$%1thKf!|?5%p=5 zrTHfPSD1jy`VcqHLj1;W02WRwSdtP9v8`Y=IeCeGUS*F#$qo3NJCmj%7R>TglkC^m zuwt~9{_yDe+L>}wO47e-5S9mi)_eJ;&>!n5Mh0b8@*=Mt#@@nM3x-pF%T$$JV|jLx z>vp03k9b5$FFhr=mX21Zq8(OEuzao~X58cXCUU3R>vBv2XIvKt0#!HLq#Hvdl^(t6 zD(N|R5^-8Gs3M@4e;<9{Hl>}|;1{jo5$tW+YxP@w8Xvf!&NpC?5lvD(-CIYX(s>)Z z65jgmg{*%y`~tW0cuZ+9GHk9^ndNJ8;Ht=>@T&RA$)?>DGG=`{-Qdv)$)ZIiUA!op z12bpWS2hR|3wk3kTa%&ouJ*GoW1~3(ZAQ#PZxjXMCwN+{pT4XoK~Poz2atd;ge1yh4~4k{lWFoEHV_>pU8{Ddl7&+4MDU?qa!sBu=(0$Y?1SCkoX(GoI^;e4wZlj>Ab9L94V5)N0u ze@S66@ZZwIaKNQZU^q>|ZAztYXg9;=pTR%JlQ9l~-}NHbpmH|K1+J^B#aiJ`VOpE3 z--I})j6R)jvSiJf={den;suuC%<4aqBpR9f$J8;`^gc#6wxr{eg{!tgHI?Q*0AH0g znJ7+hY4f`hx-^TJ11x?p&^%>aMDNHATxnktM?oQ!ck<8B?`IN!SZK+1jTE@4JDoEr zl4J;0R#ITzaKL}aDl1hZq+}w@D!GNRIV#SXv)~WaLHPn{>dC7S=Ksj)AE}Mn*0pVke;ldr;1yJj~l}EqOi})Owpad?(aTv$TSg*-p??b z0$7QuQ@^weV^kt>D66*-&{*>g32G;z&k<+N3W!!2dC4hzAEVjnC=Qy|1$#;6v$WG3 zLyZj3R2dV#v$VjLif9J3i%x`ccRrz5%OnaLz;lf3D2ay_RMkX30{c5>jT4!UqZ`E9 znM|%TV@3W}o;b>JAoBwCCUr_92zj?!=EPDJiz_6?n5+&-tc9P4+j-)Q>!(J*ZMx#T z#k%P$o*4R_-#FMEKrez3zJ88v zv=AJ`KyJdK1CD13N5Z-sDRg9)%-2F5RoyrWm@zm31nt9qVGp z1nZ2g&ha)ycs2*r*=;{ftmpWa+(PN*AB~0hj^NDuNfsEgTaG%ae#E{UiI0)OkJ%f^ zvS7Wg*7+Me8JJRR;u63+|6uS;I5l@9H|co8oI_N63iJiwF6$|6@a{)}O`Q`@6wZ~_ zB+vd$q4!v73r>fglgymoYh1Ik#5E36(v_az$6)10KV2hz(!+&jqZQ`i8;l=^XI{&`G>|E!^jAW;g7>8N5$WW+NS6F+{r!L!6}82`MSAB*8Z* zdU}nuoPG}`tV2hf>Y-W&ETG1U4)ik*qA{|*W{a-r8-nJ7H)3gFk(o(JF6bYLWxOU$ zN`_Cq0WT+%P&ABwjC#Gp#w*73)bCp5w=k@AUfIbmyhG$n9Ssn*SP6v^Zd;}zy0X{y zzJmj02!}`<>3`AVMpIbF^0)X?V?rmN$r^FkOm@=yis?P+8U~Qw6WJCA){cAq7h&pu zd>AK_DASz6?s;{|^4+{~jHF4>18{;9qQW=4Xa=cqBQ)M_#zl zo-uzEzy4${BCr(!$MVfAg_se8tN+gm&2TW%fA$QBv^d^g8w@q+$4Y1MB{rno{Si39 z^W0L&K$%3n;qY8JVJ^L?6wX6+OE=k+NmW{4finx1FH06L#0D*AVF8@KyGPZO(H*7q z!D*yl$8pEs|Au+p@bdOICFu+_M5vsjvK@@yhO{KNooQqd91JisMyOTwdbHMMYurxO zE(m}2dW>;tG-j`c{q<2-6E9y90<7HN89=%aXl363cpxugf^53$H(>4|7Ox&^5)uZE zF<0RtZmBxC!$6Bzg@VJPyxb4j>vviWX_~SuBRSEKgs@suw)3<3U1tCUS1+n(Q1n}7 zK1vRVRAK=B;?@iL7d?Z|J*v)P!XpHe(0{kA3a#uo77rPRm|V4)T1R&(a@BBTA}CZ4H7#+C6h0_ISOex$UuJGjetyk|ylx(xoqa72W zfhDe$t!;-mO}UBl4XCb#Sr38HT5zyA=m+BTclk$Fx~e*Om;RPaFW0gr1=9=xW7J%* zX}_v%YXf0x+5Oyx5@Q7+{PqaOwQ#}hJ(vJmrF8nQYZ>6*5`Av&dsQTsGuR?U$aj_2 zwrRg+ye#`^b_?657(z@v$Q4%Bm`{B{u;Vy&^JO+YwX>qFLY6+&QfC(%f?wZAPCK<2 z*x2Qo<=q@8(bqwv9O}zg9(H!_>}G(8)Tg|rSCZ<2U)Gs8Al8+PXzF9H8QvqxXY!tDrmr(dC=^5l!bz z74kc|hQR&b;YIlfEIu`+Sv82f;gyOzkC>M2l8y7XV)R-;?m|1q&AYVUfUW!|lJ`vA z4`1MANJd#-v-LLsV--M0FcizJ+iH}$5RhjwPA_42;{0j;<&{(V_zfsu=T3M6nc)b~ zU7_`V1CT%D_<`{#ggFFRXjIr|1ay#YH_2j3eSsn>#L2A6KU0 zbzf1+ydptTyYE>E_c?md{0jszUtZA`1_rQbX%5Xz_kCz{Q(iT{#Cx{d7Hc~Lc1YjT zaN)+B=w;jFZ8d68DGD@`8)D`Y^siLRa}ZSDvIn$%sZK+FmG3TccfZ7|UyiQF;MwX! zT=Bspg?dV%mN^K?cfTEY>TrJC6IK;a?GE2iaiY)C%hsM@Xb9I0;-&BID6bY6M)zo#vlP> zt$jjv3^v0VSS+C*Z}bg)&r!i9t!0&c?1+y^O^pZy5w7i znswQ(Z-B^^e3R@9>eHiRPVIEM`FtB8IH=+LxL6nOtN)o z!J3d4`jOa88~Z-~Cpu5KZIl#2I4RHHGv&G@guKB>?x)mV3KPQRB9QW{k`XEl)Z4&f z0zEU$zbu39WIjqr5#6SRqI$?Wzu}pDLXrS2o0Nj0N8D87*|H(DFX>(?^vjkN3(xrF zl-9G6&ptE~A0Q_(52^;U$?O!o36}@Ze*2g*LqA0czw8fS9~q?>tO%-`Wb=S4%6u=9 zH}^T0%7_Q#kSwM7fM=oE($dv=XT=3x-U5)Pa{716a0tPxmN0lcZ?@H42bF zdJGd!M`CgX>UGtL{q1x^v4$4m=O6SqKHE;3wTS`_=BBb7{%H<|5hPUS2scY4=fS2Q$PKZbI$T_m~G-LfQ#iMIo^f_`Y0cGxgeH{!uIo2K1u$1=}DgD&$O}Ov{ai{Ha=f zUvLPJ7{wDgHE6t`WgSc{Xk^YS(+4$>?i)vYrdzz&1xwZJXxrsA!@(~ zITmi_RtfM0GzvDv$BB#C{+5G%o1R=z*0s=h<6v&lW;y6gz>W%88s`CrFqy* zaHzZJ8U8&}YuPU(g|QmJz^X#h8$0TP@()lsD0~dp5*h$s%V}@HVG+OnJcrc~2i$bB zDNQL7H^H;rcz|KAnIXmpMPE!pLY0k{v}2<6(BiEUJ@U7)n8z&{v=9Ktov3A^#hYcy zuujNEuXL+n&c)8lVa=x|p3Tu?sq)s+l9rdm!U-t71twqle=^t6LoqE5AL*-PWKGFN z&tfvB!~$>&p@*&?c_4_S4*HP_Gt#nsf^p5SlqMwitbMl7T>vd&sCE~QqRu!-l80h7 z?^Cer_14T0*q?H+t!YSge?Nx>{@T|<<4am?;h>c`0QKQ)Deg(kV?0x{-IC9QmT}+e z9jfwc6O| ztkq7?-ra{wgEM@mxk+>qzJ{nIBk}v5$p0XMEm-{3O?ep*9j=_(koPh^_47BSfp@eN z$hre=$uGGcH?MfNZ#lh&S9kBqD6y@Pi9;X}@YDfea(-Lt*%(1&hAYszVR3|P#+@lc z(=Z>$m;T1FVwmjq9wMm7mM2_M)OH|rU;+M}r^dkJ0_9Ue)q(p|Ge1+dZl(mV@)2dT zlSuCa@5A+8va~$m)@h`uX^FTl4zMvYRT}Ob_bW4h(4FwUo5Whx1$_g?lelk>?HEqsKz@%W4xKs*|3Pxm`dieaNQcWt%hLe6vc_fuGD&@aMmyf&9x+u(`a-z}J z_S)8UWR>|s!fPz^TO3kY%8XSH1wnJ^EEVO7+uPeM;cT#GtYDyyGn?y~AkEjFv@QHF z$2tsGag@Kt6T0X*iPb?0r%0}-I!ert(kO|RJu_3f+t;grMaEr{n9FowJ!!Saow_`k zJ&K;{X@5!;Y%kZb<)Qk8Fg7f*nD0KdYz1?D&-_cAt(uuUe4#@1IiyYfJyWy|G2sJf zz(2FPdVSzs+%MisOffRc5*R_jtWhLz1`d7#4EQU{frZtNpmwW+%kmh!m!9n zpE-n~qHel?-cC@qCd9HcD0aSE%XMd&_T$4I2uI~gK~rjtS+S*<;?iOP#u`S;;W%rLc4zq3JPWIaja zx}L3jT|IhCSBBXV;S$`>DKHiXl)NYc$Xd>IbajR3(*-xAoGmA<*iFh-D>akoO!9IP z2dB=Fh10W-LxV-_d-CEAAvwduiX9OqZO9h7B?^xizr&RqcqC1CPl>eT&(Iu29PT5n z-PPU@?0eRws#)c^mefrpHn{yt=kD3rXt*iu^SM7qT~p6FBqTktzFwlps*x3c)#@$B z3qdR#H-eq&L9|9MLyv0B2oQRvSo6t0vK!%tbo_2BSuLQ+tNukvhue0bl1Qvk@qxx# z6a?H1MELlZN)O^uO_ldcmG3!XrbE1?k>Wy`#2&;}FjeyILpC_!K}^pyvg91V_MJi^ z#%*(Wk$lA#a)oY*oB1g|%D@OGk>TX6R@EmzrO~6PHLZ6M9Y7iiZCh8oGDF17G{q<| ztO;XJ6>05gD^AK|9u%QpCpUQrf`3~4lgG$fZnN9$t70=gtQCXeV4*?LItT|Rjzny_ zVZu;Hl8{C=(0ZtZz_5Av`4SE^F~*^vO(wZcKT!5~`9%?2BmU085V);w>$clnt27;C z2mQ1~?K%UhyML0GYy#&4WJ=e@HA(BvJ_NFPNMkvQ@X_9sF-4hT@&g)lUeGb(!k0=kX z9uSdf&E*l!loo-$96b@+WjJngU16ko%mObP zTf8dj8YSu8TIv+jY9f3>*9di$re!waOPT>S=0dM;g2ECx1*12RA9BXQ(70yc)1k8* z=RJn(g0Y4Xip_{jtDSSpPCX57d=9t92#t{N7eg=8u$j5TVF-MCz<5>Z5+W3fJ=E34 z2!(H{#$>~*qY{ejToW}PW?uF~GRBKwwN=L1} z{E|5knf{7~Q6v|^rrQIV^kT{%&pO}xilf@4?XS$QfxXc2V14W1^TDp774<~p>ZHF{ zN*hVsQ2z7V*(U2S((6a5xD)aPv%Tu72wO~ds7|>Bj)H&q@$plF#`#xCGGu(l=o<|Y zs6T`kWyeI-AX2f*V|3xp8$wr@g`bcx-y^VXHEHxT_8pWv$uqIulvw}}b!ez^njI4T z%&D{?T;euDM=tCzN;TJ!j$q@BU(_LRppU#=Gz}~!nGXon0G5k%&Ek@ z@`3r&E(~X>x(PZFqQ@lynw*gt_EBKZd#}0XQOx4)ynIZlr8Y>r7 zujWp>7pR>?cyVup_sCPph9QwOoUayJ8s#P+jfK|(sS9r+tjTop`1 z5xKo+P!eWC&5nY$5*ij7#O#6mS1Tn$I4iy2_7T#kd-1_4wz1!w^O5&1o??M^eN8xX z^e4-o32ozZ64sM&xGhQ+=!GI8q)+!>ETMJnq1?iX#3ZN^P9(N(>BOY`|A;fwZb+ic zzX7>Yre$8H5k-c~8&zq4PC!aJ#|6*ve+FLVS|V?vwK9E3`3GoQ5uXq5Y4FL< zek!jVniD*vuO1N!<9oy0;&4TZ+xD#CFk#N)UNkm>ht<*ffOkZhc@gM42*T;=W*@Hp z@S9$!8aZf z3m(?<=Z+L4uwcUk6ldJp`}HPuliir9NP;gL$=?djVf2?2R%7rKTWN_P(JU{=OR1?Sfi^lzm<2q#6%TmKG0z)pY3Q!2HJ2)pqP^R{*@gpwJ$HaK=YGcN#yV~bJhjqj%Bm(w4kyj|fWi~Ql98RA!VrX1e@W48{U zjLIBS{$I-0QAIq$Zh9e#5oRI5fU#N;Uv$QdUZ`M4Of9a z4Fkk)xsHEc1YoN)=IuC=hs*Z6X;rW$*S57+*sx9f60>nABz*2MR6~f0=frA{bBoY{ z!wWj%;cThrv(&Hp52rozZ0O*}mv;?P<*XbEr-turX>z(P#DI<)!-V0<1|NLECi+Sx z{I1l&ih(CG7e)uPzgis@P^C9iE+>n_^cfI@OoC~j3Riqg((yf>?J|lGu%r2>V4bmC z(@;*`OD4q9$b>x02ju9EW983GJG%WW?nol6o|1Da-Scofy-&9757uC;RyfnZ-E-US z&#?e<-x_QUI)nGR=V}znD)^tfI--jz~76}So({VeWm5r=U`3MJW~$8eXoy6 zzFbN$8u)mqH|{U63bL3nyO4X07IQutdrz zjD-7VH>{;=cc<&mQDHg7_Xza^cvQF}t<}AUh(`Vg)vtgC`qFVcb8;4ctpFTGGy*U> zst?g<3RtE()=6-)zY%?ieFT0P83Pio$ORNZ+S5~pU^uKpQ%VXoB!V-Pu>PF_NK)nJ zTua^%zn{{Z(oNSHZK!965+%9duN`EP7dE40U^-B@z_+{yQ<9B+$tKS(Wf2ap8TXl<=BCk;;@!`5J|Ly!w*`qLW`b5p5(5JjAziF=)k}6a;Wn zs8aD{r}`%>_L-)eLWAgJf4GBKBCI^y5+-qLZH#LNMxk6K;v%s1=w{U-V? z|3E0(D#d)^vO9y9a=QwX>Zj=sLerDVQe+xFM(yf3KpYmT9ky_bKWXC#Yl+VZz~+X@ zm$U*zHOFJKYOGkpYn-qnp09a!=#G>jL1fPNPX$Dugp~7e=w+P--fw_sa93wEde>}F z%DNHw>Hy1aqKgH{GTh%@zrKEl&(DKt9K8D^bK}NRmFSBI5@e>FATIXXT9^b~A#h8N zVG$eo)}(#u z-m)Tf)ypD_h{_t}m|H3%YkmB}M!Z+s?lfd)0XxRPt=N7Nn^y0E#_Pz{d?9h zkw%)2bSQI!Ji+V=bIenj>_0~@%vR?@)py`MfI%5!@$WqQ1~lxxb7DcxH!gK{$O>6&^X^#K0q>&P5VexarO+LZg5^=UNIQA=FPnG6E-Z2iy4C9W=HTFgG zd$o>Fnwe16r$vd;e$xJpMnI}I<~mga`uOsdMUp2&saXJ}+7&B8C%g*q0SoLf#$ z7O=_(qW(2GF;f&UoedT9i>5HlPnq=qm>q~yr37TjaTrJm4u8_OB4r6}Rqt*vXGG;v z8ENHR21Ez)o`-PjzkU62|I-d3#8F5(s!w~`wvW^*y0{KW7?!Ly*c?A+Iu`+DMWE6Z5fIqI z{h?R?{BIP$_g9ip`mqOZWgCc0N_P(Wk=0%NO*`v;NgmgQSALMA!3UZR`c)Pl{YatP zLqh+Eh1$IN&k6kjJj2edeZT4gyRf?xR#Vf!M|-g$a=J4)jLWw7ct*gVSKk?B zWs-EELu+WqD@gq5D%Rnu-}vhm(-|f3_biZnO-_?IUXV2MQfO*EQ#-^SH!@`XvxL26 zkR;%Z=|=ACSW{%NT~xoH-XIQW4c_7T@e6A3@^Hl%QNt<~*G<{@r^XXAdVx_Q*=+c_ zH)W3Iv>$1g?vO<4G0uhted_QhY>6U?&5s&dTfNEH+|e)>>e?|7i+!cZQ3QwGwL;h}LjgAR$Cr|@_1@#Mm^i{+#xn%$lu zUOhAMkCEzv3zSmEe=n81bTzV_AMuj2l@#*P>jEOJn|4Br5;;V&y|PAoKx*<-HYgRB z{NX}e7UnMx(`ZY4Ostf%paj&JJq=SUT^P8S(3o<2a_~9Fiy>+>d+>_aEYt8;KeQ#l zYPymaiW#imWQr<5I}<&py{hu>xq|x+MI-9U^&Tn~q|2!3P=2r&;91vN-L@P%jFTG# zPyPGg+6B13Dz{nOloQN3%Y{7HW4zU%I)x7i;7Q-AvaF|KhOcb)!0BId6Csfsux2jm z2!T2J_X=H9Nz-revZ#qo-SW%WOs2|~s6o38FDx(f@m7$#Q|TERP3xNC*`-lyWB=E$ zF@mR*L#P5hfz4o45313Jq}FcKDtf+xFyK?Z$ZsJUI^qSdPLhhd$W-eLY8Rv>@RcN{ zXrwoZAfi>F0mq6d%~-m5#1A=ec*2a3QS4_NN^y9otp#j(e*T*m2=mia@ z1C!IL-#O2h#5j`-8~=Po4GNup0_0SqXRx!9qE>g6(;I5pSgy(4)r}u3Gw;-cY%6ca@{|zR z2+<>bEuvGYP(v*^A7NQ1ucF2_F?Nno;*%Kr;| zHff~qAB=nX@J6<90D@RVMatn)qiD#g_{`uLbRhH4k)T`DPPuQ7loA*73Q$e2Sm#d; zgJVt1%+X-66RoN`WJ`Lx#!`O|oobC(;-IpSdO*)0)mmncc+boTNh+@;{!4xJ$48qe z;)f)!?C4F9Dm)^gm`p0pdq&e@Pr6}D3Xyfd&Q~N-brZY?qs;I$&N`6}g=hM~^@jFH zkd)2;UM@5;QWM_zEYO8;P}y$T(mLGnjLN>EBQqmh#FNAA*a4Fr6^ppCMGW!Gos}I$aIuPN&WTZYGUgr2w|cP(^)J+~M*AOS=D0k;BtI zuKA?|N(bXW&Sx>UMxVVAk_6X0eZS768tZP9iG4ifaz}}LnCRZD`oEPO8x?v(Q^Y1% zFN)kL2BLh(Gqx>wvoEWJcm6yHWfJgBBu9~S$LqO%enHT0KnH_Dpg!<2YD@&^6=9-Ejk9`R4JurUMYNllIFM)UeJmTfn0AL_bmc}F@w7( zW{F5iHSTfhVrf_AqRIg=IiXxZN$wD64Y&1RW5ri=9OW;x>(C0~7I{U9jxH(F0`*D* zPh;P>RDLQU+%NwAcJeUIGqoWvKr=*w^zCsjAJbngK*>f1Tit}W;U*v&yLQq;AyO<8 zD@YT(yxaXF$wx9L7>7Z)dMN5BxZqf^TTGdYarU4hTK`O0?Vo5fOt9XTHx77CY4BR( z3=@#NFP1XqmbPTd<>=y0{C5DB_c@8=Zn|m*nh^c7g6ojjV3!=d;D|SAj`fk zkn2p87oXp|TMkp;pNfQ7PJ;rks=arDkrTGYZ?eU}6DAE?0zbZtNh=DYqkWLx8nmO# zzk|OrE!J5GNpIyEnO_)pw!rql7D6?dBt3~Hpz~E$YVEtCQ|SEZ1ObF5Gi+xKejUBJ zBXjc|!UH=q-+7f8TE^JoTQ7VT;VUBR(NwO-slo+u*MABS1w z@GC*dTwz=HDjKbFXIHwU>HRp9YH?BOCm;%f7aQseLON84bw5dZxBVRjpD)Rq+&{_a z>`|VWhY52Bk=V+*Fj|8IO%PAT52$M{UxdK6S!EWS-KV&wSYsjS2?~1(1))-y-GQt( z%n(|m_Dc)M#~49@cb z&P80Rr;pN}ww&Qv^pc9Y*Ab)8nf(n2H^Lt}<(~frR6sIg>FIGtk0+pa2jrzzX7DVZ zEYj9UJq=Hzd7~`wJ#&eXe$H|Wb-l|X=dVt*@?Az0W#>cg;2b#pbYn~5M~{)TG`^b+}@!bFSWQ zB(9K>z%zvXP2}`1>SqKX&8(fGa`mE+W$q!OMi5rFH_K$K9VTS|CQKJ`@Tj{Xr2If% zs5$Q^>0@}+xQl11V|KeIK2}$8%8#?B?H|x#(k)lGn-WlAS%XwbRLt@HP98wSL3loj z{4=b>Wi0CnZm8%r7Y63ixpb*Wd7*?^oXT!DpqCnSqo>j>^P7CZLX2&?%F_~2mTc%R zNuv@}L-Rywzxu^d+2B#(Vp&I0kFf5AM+PywjdkFI)l(U3n6}5(sN0{pv|9X}^vIq7 zY~4F7B$=4s##Ol1cc#8rG~varB}w|Lk=X!-Yz=|JV!C<9!jc0;J&xvLnS!xp>PpDQ zY3(@C?)+L|Mpo8LWsbw5jzD zpO